+++ /dev/null
-Makefile
-Makefile.in
+++ /dev/null
-Makefile.in
-Makefile
-config.h
-stamp-h.in
}
#endif
+static gmx_inline __m256
+gmx_mm256_unpack128lo_ps(__m256 xmm1, __m256 xmm2)
+{
+ return _mm256_permute2f128_ps(xmm1,xmm2,0x20);
+}
+
+static gmx_inline __m256
+gmx_mm256_unpack128hi_ps(__m256 xmm1, __m256 xmm2)
+{
+ return _mm256_permute2f128_ps(xmm1,xmm2,0x31);
+}
+
+static gmx_inline __m256
+gmx_mm256_set_m128(__m128 hi, __m128 lo)
+{
+ return _mm256_insertf128_ps(_mm256_castps128_ps256(lo), hi, 0x1);
+}
+
+
+static __m256d
+gmx_mm256_unpack128lo_pd(__m256d xmm1, __m256d xmm2)
+{
+ return _mm256_permute2f128_pd(xmm1,xmm2,0x20);
+}
+
+static __m256d
+gmx_mm256_unpack128hi_pd(__m256d xmm1, __m256d xmm2)
+{
+ return _mm256_permute2f128_pd(xmm1,xmm2,0x31);
+}
+
+static __m256d
+gmx_mm256_set_m128d(__m128d hi, __m128d lo)
+{
+ return _mm256_insertf128_pd(_mm256_castpd128_pd256(lo), hi, 0x1);
+}
+
+
static void
+++ /dev/null
-Makefile.in
-Makefile
\ No newline at end of file
+++ /dev/null
-Makefile
-Makefile.in
+++ /dev/null
-Makefile
-Makefile.in
+++ /dev/null
-Makefile
-Makefile.in
+++ /dev/null
-Makefile
-Makefile.in
+++ /dev/null
-Makefile
-Makefile.in
+++ /dev/null
-Makefile
-Makefile.in
+++ /dev/null
-Makefile
-Makefile.in
+++ /dev/null
-Makefile
-Makefile.in
-stamp-h.in
-stamp-h
-config.h
-config.h.in
+++ /dev/null
-Makefile
-Makefile.in
-.deps
-.libs
\ No newline at end of file
+++ /dev/null
-Makefile
-Makefile.in
-.deps
-.libs
\ No newline at end of file
{
if(log!=NULL)
{
- fprintf(log,"WARNING! Binary not matching hardware - you are likely losing performance.\n\n");
+ fprintf(log,"\nBinary not matching hardware - you might be losing performance.\n"
+ "Acceleration most likely to fit this hardware: %s\n"
+ "Acceleration selected at GROMACS compile time: %s\n\n",
+ gmx_cpuid_acceleration_string[acc],
+ gmx_cpuid_acceleration_string[compiled_acc]);
}
- printf("\nWARNING! Binary not matching hardware - you are likely losing performance.\n"
- "Acceleration most likely to fit this hardware: %s\n"
- "Acceleration selected at GROMACS compile time: %s\n\n",
- gmx_cpuid_acceleration_string[acc],
- gmx_cpuid_acceleration_string[compiled_acc]);
+ printf("Compiled acceleration: %s (Gromacs could use %s on this machine, which is better)\n",
+ gmx_cpuid_acceleration_string[compiled_acc],
+ gmx_cpuid_acceleration_string[acc]);
}
-
return rc;
}
file(GLOB NONBONDED_SSE2_SINGLE_SOURCES nb_kernel_sse2_single/*.c)
endif()
-# For now we enable the (existing) SSE4.1 kernels for all higher accelerations
-if(((GMX_CPU_ACCELERATION STREQUAL "SSE4.1") OR
- (GMX_CPU_ACCELERATION STREQUAL "AVX_256")) AND NOT GMX_DOUBLE)
+if(GMX_CPU_ACCELERATION STREQUAL "SSE4.1" AND NOT GMX_DOUBLE)
file(GLOB NONBONDED_SSE4_1_SINGLE_SOURCES nb_kernel_sse4_1_single/*.c)
endif()
file(GLOB NONBONDED_AVX_128_FMA_SINGLE_SOURCES nb_kernel_avx_128_fma_single/*.c)
endif()
+if(GMX_CPU_ACCELERATION STREQUAL "AVX_256" AND NOT GMX_DOUBLE)
+ file(GLOB NONBONDED_AVX_256_SINGLE_SOURCES nb_kernel_avx_256_single/*.c)
+endif()
+
# These sources will be used in the parent directory's CMakeLists.txt
-set(NONBONDED_SOURCES ${NONBONDED_SOURCES} ${NONBONDED_SSE2_SINGLE_SOURCES} ${NONBONDED_SSE4_1_SINGLE_SOURCES} ${NONBONDED_AVX_128_FMA_SINGLE_SOURCES} PARENT_SCOPE)
+set(NONBONDED_SOURCES ${NONBONDED_SOURCES} ${NONBONDED_SSE2_SINGLE_SOURCES} ${NONBONDED_SSE4_1_SINGLE_SOURCES} ${NONBONDED_AVX_128_FMA_SINGLE_SOURCES} ${NONBONDED_AVX_256_SINGLE_SOURCES} PARENT_SCOPE)
--- /dev/null
+/*
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2011-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ * As a special exception, you may use this file as part of a free software
+ * library without restriction. Specifically, if other files instantiate
+ * templates or use macros or inline functions from this file, or you compile
+ * this file and link it with other files to produce an executable, this
+ * file does not by itself cause the resulting executable to be covered by
+ * the GNU Lesser General Public License.
+ *
+ * In plain-speak: do not worry about classes/macros/templates either - only
+ * changes to the library have to be LGPL, not an application linking with it.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website!
+ */
+#ifndef _kernelutil_x86_avx_256_single_h_
+#define _kernelutil_x86_avx_256_single_h_
+
+#include "gmx_x86_avx_256.h"
+
+/* Transpose lower/upper half of 256-bit registers separately */
+#define GMX_MM256_HALFTRANSPOSE4_PS(ymm0,ymm1,ymm2,ymm3) { \
+ __m256 __tmp0,__tmp1,__tmp2,__tmp3; \
+ \
+ __tmp0 = _mm256_unpacklo_ps((ymm0),(ymm1)); \
+ __tmp1 = _mm256_unpacklo_ps((ymm2),(ymm3)); \
+ __tmp2 = _mm256_unpackhi_ps((ymm0),(ymm1)); \
+ __tmp3 = _mm256_unpackhi_ps((ymm2),(ymm3)); \
+ ymm0 = _mm256_shuffle_ps(__tmp0,__tmp1,_MM_SHUFFLE(1,0,1,0)); \
+ ymm1 = _mm256_shuffle_ps(__tmp0,__tmp1,_MM_SHUFFLE(3,2,3,2)); \
+ ymm2 = _mm256_shuffle_ps(__tmp2,__tmp3,_MM_SHUFFLE(1,0,1,0)); \
+ ymm3 = _mm256_shuffle_ps(__tmp2,__tmp3,_MM_SHUFFLE(3,2,3,2)); \
+}
+
+
+static gmx_inline __m256
+gmx_mm256_calc_rsq_ps(__m256 dx, __m256 dy, __m256 dz)
+{
+ return _mm256_add_ps( _mm256_add_ps( _mm256_mul_ps(dx,dx), _mm256_mul_ps(dy,dy) ), _mm256_mul_ps(dz,dz) );
+}
+
+/* Normal sum of four ymm registers */
+#define gmx_mm256_sum4_ps(t0,t1,t2,t3) _mm256_add_ps(_mm256_add_ps(t0,t1),_mm256_add_ps(t2,t3))
+
+
+static gmx_inline int
+gmx_mm256_any_lt(__m256 a, __m256 b)
+{
+ return _mm256_movemask_ps(_mm256_cmp_ps(a,b,_CMP_LT_OQ));
+}
+
+
+static gmx_inline __m256
+gmx_mm256_load_4real_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
+ const float * gmx_restrict ptrC, const float * gmx_restrict ptrD)
+{
+ __m128 t1,t2;
+
+ t1 = _mm_unpacklo_ps(_mm_load_ss(ptrA),_mm_load_ss(ptrC));
+ t2 = _mm_unpacklo_ps(_mm_load_ss(ptrB),_mm_load_ss(ptrD));
+ return _mm256_castps128_ps256(_mm_unpacklo_ps(t1,t2));
+}
+
+
+static gmx_inline __m256
+gmx_mm256_load_8real_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
+ const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
+ const float * gmx_restrict ptrE, const float * gmx_restrict ptrF,
+ const float * gmx_restrict ptrG, const float * gmx_restrict ptrH)
+{
+ __m256 t1,t2;
+
+ t1 = gmx_mm256_load_4real_swizzle_ps(ptrA,ptrB,ptrC,ptrD);
+ t2 = gmx_mm256_load_4real_swizzle_ps(ptrE,ptrF,ptrG,ptrH);
+
+ return _mm256_permute2f128_ps(t1,t2,0x20);
+}
+
+
+
+static gmx_inline void
+gmx_mm256_store_4real_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
+ float * gmx_restrict ptrC, float * gmx_restrict ptrD, __m256 xmm1)
+{
+ __m256 t2,t3,t4;
+
+ t2 = _mm256_permute_ps(xmm1,_MM_SHUFFLE(1,1,1,1));
+ t3 = _mm256_permute_ps(xmm1,_MM_SHUFFLE(2,2,2,2));
+ t4 = _mm256_permute_ps(xmm1,_MM_SHUFFLE(3,3,3,3));
+ _mm_store_ss(ptrA,_mm256_castps256_ps128(xmm1));
+ _mm_store_ss(ptrB,_mm256_castps256_ps128(t2));
+ _mm_store_ss(ptrC,_mm256_castps256_ps128(t3));
+ _mm_store_ss(ptrD,_mm256_castps256_ps128(t4));
+}
+
+
+static gmx_inline void
+gmx_mm256_store_8real_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
+ float * gmx_restrict ptrC, float * gmx_restrict ptrD,
+ float * gmx_restrict ptrE, float * gmx_restrict ptrF,
+ float * gmx_restrict ptrG, float * gmx_restrict ptrH, __m256 xmm1)
+{
+ __m256 t1;
+
+ t1 = _mm256_permute2f128_ps(xmm1,xmm1,0x11);
+
+ gmx_mm256_store_4real_swizzle_ps(ptrA,ptrB,ptrC,ptrD,xmm1);
+ gmx_mm256_store_4real_swizzle_ps(ptrE,ptrF,ptrG,ptrH,t1);
+}
+
+
+static gmx_inline void
+gmx_mm256_increment_4real_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
+ float * gmx_restrict ptrC, float * gmx_restrict ptrD,
+ __m256 xmm1)
+{
+ __m128 t1,t2,t3,t4;
+
+ t1 = _mm256_castps256_ps128(xmm1);
+ t2 = _mm_permute_ps(t1,_MM_SHUFFLE(1,1,1,1));
+ t3 = _mm_permute_ps(t1,_MM_SHUFFLE(2,2,2,2));
+ t4 = _mm_permute_ps(t1,_MM_SHUFFLE(3,3,3,3));
+
+ t1 = _mm_add_ss(t1,_mm_load_ss(ptrA));
+ t2 = _mm_add_ss(t2,_mm_load_ss(ptrB));
+ t3 = _mm_add_ss(t3,_mm_load_ss(ptrC));
+ t4 = _mm_add_ss(t4,_mm_load_ss(ptrD));
+
+ _mm_store_ss(ptrA,t1);
+ _mm_store_ss(ptrB,t2);
+ _mm_store_ss(ptrC,t3);
+ _mm_store_ss(ptrD,t4);
+}
+
+static gmx_inline void
+gmx_mm256_increment_8real_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
+ float * gmx_restrict ptrC, float * gmx_restrict ptrD,
+ float * gmx_restrict ptrE, float * gmx_restrict ptrF,
+ float * gmx_restrict ptrG, float * gmx_restrict ptrH,
+ __m256 xmm1)
+{
+ __m256 t1;
+
+ t1 = _mm256_permute2f128_ps(xmm1,xmm1,0x11);
+
+ gmx_mm256_increment_4real_swizzle_ps(ptrA,ptrB,ptrC,ptrD,xmm1);
+ gmx_mm256_increment_4real_swizzle_ps(ptrE,ptrF,ptrG,ptrH,t1);
+}
+
+
+static gmx_inline void
+gmx_mm256_load_4pair_swizzle_ps(const float * gmx_restrict p1, const float * gmx_restrict p2,
+ const float * gmx_restrict p3, const float * gmx_restrict p4,
+ __m256 * gmx_restrict c6, __m256 * gmx_restrict c12)
+{
+ __m128 t1,t2,t3,t4;
+
+ t1 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p1); /* - - c12a c6a */
+ t2 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p2); /* - - c12b c6b */
+ t3 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p3); /* - - c12c c6c */
+ t4 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)p4); /* - - c12d c6d */
+
+ t1 = _mm_unpacklo_ps(t1,t2); /* c12b c12a c6b c6a */
+ t3 = _mm_unpacklo_ps(t3,t4); /* c12d c12c c6d c6c */
+
+ *c6 = _mm256_castps128_ps256(_mm_shuffle_ps(t1,t3,_MM_SHUFFLE(1,0,1,0)));
+ *c12 = _mm256_castps128_ps256(_mm_shuffle_ps(t1,t3,_MM_SHUFFLE(3,2,3,2)));
+}
+
+static gmx_inline void
+gmx_mm256_load_8pair_swizzle_ps(const float * gmx_restrict p1, const float * gmx_restrict p2,
+ const float * gmx_restrict p3, const float * gmx_restrict p4,
+ const float * gmx_restrict p5, const float * gmx_restrict p6,
+ const float * gmx_restrict p7, const float * gmx_restrict p8,
+ __m256 * gmx_restrict c6, __m256 * gmx_restrict c12)
+{
+ __m256 c6l,c6h,c12l,c12h;
+
+ gmx_mm256_load_4pair_swizzle_ps(p1,p2,p3,p4,&c6l,&c12l);
+ gmx_mm256_load_4pair_swizzle_ps(p5,p6,p7,p8,&c6h,&c12h);
+
+ *c6 = _mm256_permute2f128_ps(c6l,c6h,0x20);
+ *c12 = _mm256_permute2f128_ps(c12l,c12h,0x20);
+}
+
+
+static gmx_inline void
+gmx_mm256_load_shift_and_1rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
+ const float * gmx_restrict xyz,
+ __m256 * gmx_restrict x1,
+ __m256 * gmx_restrict y1,
+ __m256 * gmx_restrict z1)
+{
+ __m128 t1,t2,t3,t4;
+
+ t1 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz_shift);
+ t2 = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz);
+ t3 = _mm_load_ss(xyz_shift+2);
+ t4 = _mm_load_ss(xyz+2);
+ t1 = _mm_add_ps(t1,t2);
+ t3 = _mm_add_ss(t3,t4);
+
+ t2 = _mm_permute_ps(t1,_MM_SHUFFLE(1,1,1,1));
+ t1 = _mm_permute_ps(t1,_MM_SHUFFLE(0,0,0,0));
+ t3 = _mm_permute_ps(t3,_MM_SHUFFLE(0,0,0,0));
+
+ *x1 = gmx_mm256_set_m128(t1,t1);
+ *y1 = gmx_mm256_set_m128(t2,t2);
+ *z1 = gmx_mm256_set_m128(t3,t3);
+}
+
+
+static gmx_inline void
+gmx_mm256_load_shift_and_3rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
+ const float * gmx_restrict xyz,
+ __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
+ __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
+ __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3)
+{
+ __m128 tA,tB;
+ __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9;
+
+ tA = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz_shift);
+ tB = _mm_load_ss(xyz_shift+2);
+
+ t1 = _mm_loadu_ps(xyz);
+ t2 = _mm_loadu_ps(xyz+4);
+ t3 = _mm_load_ss(xyz+8);
+
+ tA = _mm_movelh_ps(tA,tB);
+ t4 = _mm_permute_ps(tA,_MM_SHUFFLE(0,2,1,0));
+ t5 = _mm_permute_ps(tA,_MM_SHUFFLE(1,0,2,1));
+ t6 = _mm_permute_ps(tA,_MM_SHUFFLE(2,1,0,2));
+
+ t1 = _mm_add_ps(t1,t4);
+ t2 = _mm_add_ps(t2,t5);
+ t3 = _mm_add_ss(t3,t6);
+
+ t9 = _mm_permute_ps(t3,_MM_SHUFFLE(0,0,0,0));
+ t8 = _mm_permute_ps(t2,_MM_SHUFFLE(3,3,3,3));
+ t7 = _mm_permute_ps(t2,_MM_SHUFFLE(2,2,2,2));
+ t6 = _mm_permute_ps(t2,_MM_SHUFFLE(1,1,1,1));
+ t5 = _mm_permute_ps(t2,_MM_SHUFFLE(0,0,0,0));
+ t4 = _mm_permute_ps(t1,_MM_SHUFFLE(3,3,3,3));
+ t3 = _mm_permute_ps(t1,_MM_SHUFFLE(2,2,2,2));
+ t2 = _mm_permute_ps(t1,_MM_SHUFFLE(1,1,1,1));
+ t1 = _mm_permute_ps(t1,_MM_SHUFFLE(0,0,0,0));
+
+ *x1 = gmx_mm256_set_m128(t1,t1);
+ *y1 = gmx_mm256_set_m128(t2,t2);
+ *z1 = gmx_mm256_set_m128(t3,t3);
+ *x2 = gmx_mm256_set_m128(t4,t4);
+ *y2 = gmx_mm256_set_m128(t5,t5);
+ *z2 = gmx_mm256_set_m128(t6,t6);
+ *x3 = gmx_mm256_set_m128(t7,t7);
+ *y3 = gmx_mm256_set_m128(t8,t8);
+ *z3 = gmx_mm256_set_m128(t9,t9);
+}
+
+
+static gmx_inline void
+gmx_mm256_load_shift_and_4rvec_broadcast_ps(const float * gmx_restrict xyz_shift,
+ const float * gmx_restrict xyz,
+ __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
+ __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
+ __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3,
+ __m256 * gmx_restrict x4, __m256 * gmx_restrict y4, __m256 * gmx_restrict z4)
+{
+ __m128 tA,tB;
+ __m128 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12;
+
+ tA = _mm_loadl_pi(_mm_setzero_ps(),(__m64 *)xyz_shift);
+ tB = _mm_load_ss(xyz_shift+2);
+
+ t1 = _mm_loadu_ps(xyz);
+ t2 = _mm_loadu_ps(xyz+4);
+ t3 = _mm_loadu_ps(xyz+8);
+
+ tA = _mm_movelh_ps(tA,tB);
+ t4 = _mm_permute_ps(tA,_MM_SHUFFLE(0,2,1,0));
+ t5 = _mm_permute_ps(tA,_MM_SHUFFLE(1,0,2,1));
+ t6 = _mm_permute_ps(tA,_MM_SHUFFLE(2,1,0,2));
+
+ t1 = _mm_add_ps(t1,t4);
+ t2 = _mm_add_ps(t2,t5);
+ t3 = _mm_add_ps(t3,t6);
+
+ t12 = _mm_permute_ps(t3,_MM_SHUFFLE(3,3,3,3));
+ t11 = _mm_permute_ps(t3,_MM_SHUFFLE(2,2,2,2));
+ t10 = _mm_permute_ps(t3,_MM_SHUFFLE(1,1,1,1));
+ t9 = _mm_permute_ps(t3,_MM_SHUFFLE(0,0,0,0));
+ t8 = _mm_permute_ps(t2,_MM_SHUFFLE(3,3,3,3));
+ t7 = _mm_permute_ps(t2,_MM_SHUFFLE(2,2,2,2));
+ t6 = _mm_permute_ps(t2,_MM_SHUFFLE(1,1,1,1));
+ t5 = _mm_permute_ps(t2,_MM_SHUFFLE(0,0,0,0));
+ t4 = _mm_permute_ps(t1,_MM_SHUFFLE(3,3,3,3));
+ t3 = _mm_permute_ps(t1,_MM_SHUFFLE(2,2,2,2));
+ t2 = _mm_permute_ps(t1,_MM_SHUFFLE(1,1,1,1));
+ t1 = _mm_permute_ps(t1,_MM_SHUFFLE(0,0,0,0));
+
+ *x1 = gmx_mm256_set_m128(t1,t1);
+ *y1 = gmx_mm256_set_m128(t2,t2);
+ *z1 = gmx_mm256_set_m128(t3,t3);
+ *x2 = gmx_mm256_set_m128(t4,t4);
+ *y2 = gmx_mm256_set_m128(t5,t5);
+ *z2 = gmx_mm256_set_m128(t6,t6);
+ *x3 = gmx_mm256_set_m128(t7,t7);
+ *y3 = gmx_mm256_set_m128(t8,t8);
+ *z3 = gmx_mm256_set_m128(t9,t9);
+ *x4 = gmx_mm256_set_m128(t10,t10);
+ *y4 = gmx_mm256_set_m128(t11,t11);
+ *z4 = gmx_mm256_set_m128(t12,t12);
+}
+
+
+
+static gmx_inline void
+gmx_mm256_load_1rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
+ const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
+ __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1)
+{
+ __m128 t1,t2,t3,t4;
+ __m128i mask = _mm_set_epi32(0,-1,-1,-1);
+ t1 = _mm_maskload_ps(ptrA,mask);
+ t2 = _mm_maskload_ps(ptrB,mask);
+ t3 = _mm_maskload_ps(ptrC,mask);
+ t4 = _mm_maskload_ps(ptrD,mask);
+ _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
+ *x1 = _mm256_castps128_ps256(t1);
+ *y1 = _mm256_castps128_ps256(t2);
+ *z1 = _mm256_castps128_ps256(t3);
+}
+
+
+static gmx_inline void
+gmx_mm256_load_3rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
+ const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
+ __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
+ __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
+ __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3)
+{
+ __m128 t1,t2,t3,t4;
+ t1 = _mm_loadu_ps(ptrA);
+ t2 = _mm_loadu_ps(ptrB);
+ t3 = _mm_loadu_ps(ptrC);
+ t4 = _mm_loadu_ps(ptrD);
+ _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
+ *x1 = _mm256_castps128_ps256(t1);
+ *y1 = _mm256_castps128_ps256(t2);
+ *z1 = _mm256_castps128_ps256(t3);
+ *x2 = _mm256_castps128_ps256(t4);
+ t1 = _mm_loadu_ps(ptrA+4);
+ t2 = _mm_loadu_ps(ptrB+4);
+ t3 = _mm_loadu_ps(ptrC+4);
+ t4 = _mm_loadu_ps(ptrD+4);
+ _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
+ *y2 = _mm256_castps128_ps256(t1);
+ *z2 = _mm256_castps128_ps256(t2);
+ *x3 = _mm256_castps128_ps256(t3);
+ *y3 = _mm256_castps128_ps256(t4);
+ t1 = _mm_load_ss(ptrA+8);
+ t2 = _mm_load_ss(ptrB+8);
+ t3 = _mm_load_ss(ptrC+8);
+ t4 = _mm_load_ss(ptrD+8);
+ t1 = _mm_unpacklo_ps(t1,t3);
+ t3 = _mm_unpacklo_ps(t2,t4);
+ *z3 = _mm256_castps128_ps256(_mm_unpacklo_ps(t1,t3));
+}
+
+
+
+static gmx_inline void
+gmx_mm256_load_4rvec_4ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
+ const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
+ __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
+ __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
+ __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3,
+ __m256 * gmx_restrict x4, __m256 * gmx_restrict y4, __m256 * gmx_restrict z4)
+{
+ __m128 t1,t2,t3,t4;
+ t1 = _mm_loadu_ps(ptrA);
+ t2 = _mm_loadu_ps(ptrB);
+ t3 = _mm_loadu_ps(ptrC);
+ t4 = _mm_loadu_ps(ptrD);
+ _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
+ *x1 = _mm256_castps128_ps256(t1);
+ *y1 = _mm256_castps128_ps256(t2);
+ *z1 = _mm256_castps128_ps256(t3);
+ *x2 = _mm256_castps128_ps256(t4);
+ t1 = _mm_loadu_ps(ptrA+4);
+ t2 = _mm_loadu_ps(ptrB+4);
+ t3 = _mm_loadu_ps(ptrC+4);
+ t4 = _mm_loadu_ps(ptrD+4);
+ _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
+ *y2 = _mm256_castps128_ps256(t1);
+ *z2 = _mm256_castps128_ps256(t2);
+ *x3 = _mm256_castps128_ps256(t3);
+ *y3 = _mm256_castps128_ps256(t4);
+ t1 = _mm_loadu_ps(ptrA+8);
+ t2 = _mm_loadu_ps(ptrB+8);
+ t3 = _mm_loadu_ps(ptrC+8);
+ t4 = _mm_loadu_ps(ptrD+8);
+ _MM_TRANSPOSE4_PS(t1,t2,t3,t4);
+ *z3 = _mm256_castps128_ps256(t1);
+ *x4 = _mm256_castps128_ps256(t2);
+ *y4 = _mm256_castps128_ps256(t3);
+ *z4 = _mm256_castps128_ps256(t4);
+}
+
+
+static gmx_inline void
+gmx_mm256_load_1rvec_8ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
+ const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
+ const float * gmx_restrict ptrE, const float * gmx_restrict ptrF,
+ const float * gmx_restrict ptrG, const float * gmx_restrict ptrH,
+ __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1)
+{
+ __m256 t1,t2,t3,t4,t5,t6,t7,t8;
+ __m128i mask = _mm_set_epi32(0,-1,-1,-1);
+
+ t1 = gmx_mm256_set_m128(_mm_maskload_ps(ptrE,mask),_mm_maskload_ps(ptrA,mask)); /* - zE yE xE | - zA yA xA */
+ t2 = gmx_mm256_set_m128(_mm_maskload_ps(ptrF,mask),_mm_maskload_ps(ptrB,mask)); /* - zF yF xF | - zB yB xB */
+ t3 = gmx_mm256_set_m128(_mm_maskload_ps(ptrG,mask),_mm_maskload_ps(ptrC,mask)); /* - zG yG xG | - zC yC xC */
+ t4 = gmx_mm256_set_m128(_mm_maskload_ps(ptrH,mask),_mm_maskload_ps(ptrD,mask)); /* - zH yH xH | - zD yD xD */
+
+ t5 = _mm256_unpacklo_ps(t1,t2); /* yF yE xF xE | yB yA xB xA */
+ t6 = _mm256_unpacklo_ps(t3,t4); /* yH yG xH xG | yD yC xD xC */
+ t7 = _mm256_unpackhi_ps(t1,t2); /* - - zF zE | - - zB zA */
+ t8 = _mm256_unpackhi_ps(t3,t4); /* - - zH zG | - - zD zC */
+
+ *x1 = _mm256_shuffle_ps(t5,t6,_MM_SHUFFLE(1,0,1,0));
+ *y1 = _mm256_shuffle_ps(t5,t6,_MM_SHUFFLE(3,2,3,2));
+ *z1 = _mm256_shuffle_ps(t7,t8,_MM_SHUFFLE(1,0,1,0));
+}
+
+
+static gmx_inline void
+gmx_mm256_load_3rvec_8ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
+ const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
+ const float * gmx_restrict ptrE, const float * gmx_restrict ptrF,
+ const float * gmx_restrict ptrG, const float * gmx_restrict ptrH,
+ __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
+ __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
+ __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3)
+{
+ __m256 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12;
+
+ t1 = _mm256_loadu_ps(ptrA); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
+ t2 = _mm256_loadu_ps(ptrB); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
+ t3 = _mm256_loadu_ps(ptrC); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
+ t4 = _mm256_loadu_ps(ptrD); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
+ t5 = _mm256_loadu_ps(ptrE); /* y3e x3e z2e y2e | x2e z1e y1e x1e */
+ t6 = _mm256_loadu_ps(ptrF); /* y3f x3f z2f y2f | x2f z1f y1f x1f */
+ t7 = _mm256_loadu_ps(ptrG); /* y3g x3g z2g y2g | x2g z1g y1g x1g */
+ t8 = _mm256_loadu_ps(ptrH); /* y3h x3h z2h y2h | x2h z1h y1h x1h */
+
+ t9 = _mm256_unpacklo_ps(t1,t2); /* z2b z2a y2b y2a | y1b y1a x1b x1a */
+ t10 = _mm256_unpackhi_ps(t1,t2); /* y3b y3a x3b x3a | x2b x2a z1b z1a */
+ t11 = _mm256_unpacklo_ps(t3,t4); /* z2d z2c y2d y2c | y1d y1c x1d x1c */
+ t12 = _mm256_unpackhi_ps(t3,t4); /* y3d y3c x3d x3c | x2d x2c z1d z1c */
+ t1 = _mm256_unpacklo_ps(t5,t6); /* z2f z2e y2f y2e | y1f y1e x1f x1e */
+ t2 = _mm256_unpackhi_ps(t5,t6); /* y3f y3e x3f x3e | x2f x2e z1f z1e */
+ t3 = _mm256_unpacklo_ps(t7,t8); /* z2h z2g y2h y2g | y1h y1g x1h x1g */
+ t4 = _mm256_unpackhi_ps(t7,t8); /* y3h y3g x3h x3g | x2h x2g z1h z1g */
+
+ t5 = _mm256_shuffle_ps(t9,t11,_MM_SHUFFLE(1,0,1,0)); /* y2d y2c y2b y2a | x1d x1c x1b x1a */
+ t6 = _mm256_shuffle_ps(t9,t11,_MM_SHUFFLE(3,2,3,2)); /* z2d z2c z2b z2a | y1d y1c y1b y1a */
+ t7 = _mm256_shuffle_ps(t10,t12,_MM_SHUFFLE(1,0,1,0)); /* x3d x3c x3b x3a | z1d z1c z1b z1a */
+ t8 = _mm256_shuffle_ps(t10,t12,_MM_SHUFFLE(3,2,3,2)); /* y3d y3c y3b y3a | x2d x2c x2b x2a */
+
+ t9 = _mm256_shuffle_ps(t1,t3,_MM_SHUFFLE(1,0,1,0)); /* y2h y2g y2f y2e | x1h x1g x1f x1e */
+ t10 = _mm256_shuffle_ps(t1,t3,_MM_SHUFFLE(3,2,3,2)); /* z2h z2g z2f z2e | y1h y1g y1f y1e */
+ t11 = _mm256_shuffle_ps(t2,t4,_MM_SHUFFLE(1,0,1,0)); /* x3h x3g x3f x3e | z1h z1g z1f z1e */
+ t12 = _mm256_shuffle_ps(t2,t4,_MM_SHUFFLE(3,2,3,2)); /* y3h y3g y3f y3e | x2h x2g x2f x2e */
+
+ *x1 = _mm256_permute2f128_ps(t5, t9, 0x20);
+ *y1 = _mm256_permute2f128_ps(t6, t10, 0x20);
+ *z1 = _mm256_permute2f128_ps(t7, t11, 0x20);
+ *x2 = _mm256_permute2f128_ps(t8, t12, 0x20);
+
+ *y2 = _mm256_permute2f128_ps(t5, t9, 0x31);
+ *z2 = _mm256_permute2f128_ps(t6, t10, 0x31);
+ *x3 = _mm256_permute2f128_ps(t7, t11, 0x31);
+ *y3 = _mm256_permute2f128_ps(t8, t12, 0x31);
+
+ t1 = gmx_mm256_set_m128(_mm_load_ss(ptrE+8),_mm_load_ss(ptrA+8));
+ t2 = gmx_mm256_set_m128(_mm_load_ss(ptrF+8),_mm_load_ss(ptrB+8));
+ t3 = gmx_mm256_set_m128(_mm_load_ss(ptrG+8),_mm_load_ss(ptrC+8));
+ t4 = gmx_mm256_set_m128(_mm_load_ss(ptrH+8),_mm_load_ss(ptrD+8));
+
+ t1 = _mm256_unpacklo_ps(t1,t3); /* - - z3g z3e | - - z3c z3a */
+ t2 = _mm256_unpacklo_ps(t2,t4); /* - - z3h z3f | - - z3d z3b */
+
+ *z3 = _mm256_unpacklo_ps(t1,t2);
+}
+
+
+
+static gmx_inline void
+gmx_mm256_load_4rvec_8ptr_swizzle_ps(const float * gmx_restrict ptrA, const float * gmx_restrict ptrB,
+ const float * gmx_restrict ptrC, const float * gmx_restrict ptrD,
+ const float * gmx_restrict ptrE, const float * gmx_restrict ptrF,
+ const float * gmx_restrict ptrG, const float * gmx_restrict ptrH,
+ __m256 * gmx_restrict x1, __m256 * gmx_restrict y1, __m256 * gmx_restrict z1,
+ __m256 * gmx_restrict x2, __m256 * gmx_restrict y2, __m256 * gmx_restrict z2,
+ __m256 * gmx_restrict x3, __m256 * gmx_restrict y3, __m256 * gmx_restrict z3,
+ __m256 * gmx_restrict x4, __m256 * gmx_restrict y4, __m256 * gmx_restrict z4)
+{
+ __m256 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12;
+
+ t1 = _mm256_loadu_ps(ptrA); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
+ t2 = _mm256_loadu_ps(ptrB); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
+ t3 = _mm256_loadu_ps(ptrC); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
+ t4 = _mm256_loadu_ps(ptrD); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
+ t5 = _mm256_loadu_ps(ptrE); /* y3e x3e z2e y2e | x2e z1e y1e x1e */
+ t6 = _mm256_loadu_ps(ptrF); /* y3f x3f z2f y2f | x2f z1f y1f x1f */
+ t7 = _mm256_loadu_ps(ptrG); /* y3g x3g z2g y2g | x2g z1g y1g x1g */
+ t8 = _mm256_loadu_ps(ptrH); /* y3h x3h z2h y2h | x2h z1h y1h x1h */
+
+ t9 = _mm256_unpacklo_ps(t1,t2); /* z2b z2a y2b y2a | y1b y1a x1b x1a */
+ t10 = _mm256_unpackhi_ps(t1,t2); /* y3b y3a x3b x3a | x2b x2a z1b z1a */
+ t11 = _mm256_unpacklo_ps(t3,t4); /* z2d z2c y2d y2c | y1d y1c x1d x1c */
+ t12 = _mm256_unpackhi_ps(t3,t4); /* y3d y3c x3d x3c | x2d x2c z1d z1c */
+ t1 = _mm256_unpacklo_ps(t5,t6); /* z2f z2e y2f y2e | y1f y1e x1f x1e */
+ t2 = _mm256_unpackhi_ps(t5,t6); /* y3f y3e x3f x3e | x2f x2e z1f z1e */
+ t3 = _mm256_unpacklo_ps(t7,t8); /* z2h z2g y2h y2g | y1h y1g x1h x1g */
+ t4 = _mm256_unpackhi_ps(t7,t8); /* y3h y3g x3h x3g | x2h x2g z1h z1g */
+
+ t5 = _mm256_shuffle_ps(t9,t11,_MM_SHUFFLE(1,0,1,0)); /* y2d y2c y2b y2a | x1d x1c x1b x1a */
+ t6 = _mm256_shuffle_ps(t9,t11,_MM_SHUFFLE(3,2,3,2)); /* z2d z2c z2b z2a | y1d y1c y1b y1a */
+ t7 = _mm256_shuffle_ps(t10,t12,_MM_SHUFFLE(1,0,1,0)); /* x3d x3c x3b x3a | z1d z1c z1b z1a */
+ t8 = _mm256_shuffle_ps(t10,t12,_MM_SHUFFLE(3,2,3,2)); /* y3d y3c y3b y3a | x2d x2c x2b x2a */
+ t9 = _mm256_shuffle_ps(t1,t3,_MM_SHUFFLE(1,0,1,0)); /* y2h y2g y2f y2e | x1h x1g x1f x1e */
+ t10 = _mm256_shuffle_ps(t1,t3,_MM_SHUFFLE(3,2,3,2)); /* z2h z2g z2f z2e | y1h y1g y1f y1e */
+ t11 = _mm256_shuffle_ps(t2,t4,_MM_SHUFFLE(1,0,1,0)); /* x3h x3g x3f x3e | z1h z1g z1f z1e */
+ t12 = _mm256_shuffle_ps(t2,t4,_MM_SHUFFLE(3,2,3,2)); /* y3h y3g y3f y3e | x2h x2g x2f x2e */
+
+ *x1 = _mm256_permute2f128_ps(t5, t9, 0x20);
+ *y1 = _mm256_permute2f128_ps(t6, t10, 0x20);
+ *z1 = _mm256_permute2f128_ps(t7, t11, 0x20);
+ *x2 = _mm256_permute2f128_ps(t8, t12, 0x20);
+
+ *y2 = _mm256_permute2f128_ps(t5, t9, 0x31);
+ *z2 = _mm256_permute2f128_ps(t6, t10, 0x31);
+ *x3 = _mm256_permute2f128_ps(t7, t11, 0x31);
+ *y3 = _mm256_permute2f128_ps(t8, t12, 0x31);
+
+ t1 = gmx_mm256_set_m128(_mm_loadu_ps(ptrE+8),_mm_loadu_ps(ptrA+8)); /* z4e y4e x4e z3e | z4a y4a x4a z3a */
+ t2 = gmx_mm256_set_m128(_mm_loadu_ps(ptrF+8),_mm_loadu_ps(ptrB+8)); /* z4f y4f x4f z3f | z4b y4b x4b z3b */
+ t3 = gmx_mm256_set_m128(_mm_loadu_ps(ptrG+8),_mm_loadu_ps(ptrC+8)); /* z4g y4g x4g z3g | z4c y4c x4c z3c */
+ t4 = gmx_mm256_set_m128(_mm_loadu_ps(ptrH+8),_mm_loadu_ps(ptrD+8)); /* z4h y4h x4h z3h | z4d y4d x4d z3d */
+
+ t5 = _mm256_unpacklo_ps(t1,t2); /* x4f x4e z3f z3e | x4b x4a z3b z3a */
+ t6 = _mm256_unpackhi_ps(t1,t2); /* z4f z4e y4f y4e | z4b z4a y4b y4a */
+ t7 = _mm256_unpacklo_ps(t3,t4); /* x4h x4g z3h z3g | x4d x4c z3d z3c */
+ t8 = _mm256_unpackhi_ps(t3,t4); /* z4h z4g y4h y4g | z4d z4c y4d y4c */
+
+ *z3 = _mm256_shuffle_ps(t5,t7,_MM_SHUFFLE(1,0,1,0)); /* z3h z3g z3f z3e | z3d z3c z3b z3a */
+ *x4 = _mm256_shuffle_ps(t5,t7,_MM_SHUFFLE(3,2,3,2)); /* x4h x4g x4f x4e | x4d x4c x4b x4a */
+ *y4 = _mm256_shuffle_ps(t6,t8,_MM_SHUFFLE(1,0,1,0)); /* y4h y4g y4f y4e | y4d y4c y4b y4a */
+ *z4 = _mm256_shuffle_ps(t6,t8,_MM_SHUFFLE(3,2,3,2)); /* z4h z4g z4f z4e | z4d z4c z4b z4a */
+}
+
+
+static gmx_inline void
+gmx_mm256_decrement_1rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
+ float * gmx_restrict ptrC,float * gmx_restrict ptrD,
+ __m256 x1, __m256 y1, __m256 z1)
+{
+ __m128 t1,t2,t3,t4,t5,t6,t7,t8;
+ __m128i mask;
+
+ /* Construct a mask without executing any data loads */
+ mask = _mm_blend_epi16(_mm_setzero_si128(),_mm_cmpeq_epi16(_mm_setzero_si128(),_mm_setzero_si128()),0x3F);
+
+ t3 = _mm_unpacklo_ps(_mm256_castps256_ps128(x1),_mm256_castps256_ps128(y1)); /* y1b x1b y1a x1a */
+ t4 = _mm_unpackhi_ps(_mm256_castps256_ps128(x1),_mm256_castps256_ps128(y1)); /* y1d x1d y1c x1c */
+
+ t1 = _mm_shuffle_ps(t3,_mm256_castps256_ps128(z1),_MM_SHUFFLE(0,0,1,0)); /* - z1a y1a x1a */
+ t2 = _mm_shuffle_ps(t3,_mm256_castps256_ps128(z1),_MM_SHUFFLE(0,1,3,2)); /* - z1b y1b x1b */
+ t3 = _mm_shuffle_ps(t4,_mm256_castps256_ps128(z1),_MM_SHUFFLE(0,2,1,0)); /* - z1c y1c x1c */
+ t4 = _mm_shuffle_ps(t4,_mm256_castps256_ps128(z1),_MM_SHUFFLE(0,3,3,2)); /* - z1d y1d x1d */
+
+ t5 = _mm_maskload_ps(ptrA,mask);
+ t6 = _mm_maskload_ps(ptrB,mask);
+ t7 = _mm_maskload_ps(ptrC,mask);
+ t8 = _mm_maskload_ps(ptrD,mask);
+
+ t5 = _mm_sub_ps(t5,t1);
+ t6 = _mm_sub_ps(t6,t2);
+ t7 = _mm_sub_ps(t7,t3);
+ t8 = _mm_sub_ps(t8,t4);
+
+ _mm_maskstore_ps(ptrA,mask,t5);
+ _mm_maskstore_ps(ptrB,mask,t6);
+ _mm_maskstore_ps(ptrC,mask,t7);
+ _mm_maskstore_ps(ptrD,mask,t8);
+}
+
+
+
+static gmx_inline void
+gmx_mm256_decrement_3rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
+ float * gmx_restrict ptrC, float * gmx_restrict ptrD,
+ __m256 x1, __m256 y1, __m256 z1,
+ __m256 x2, __m256 y2, __m256 z2,
+ __m256 x3, __m256 y3, __m256 z3)
+{
+ __m256 t1,t2,t3,t4,t5,t6;
+ __m128 tA,tB,tC,tD;
+
+ t1 = _mm256_loadu_ps(ptrA);
+ t2 = _mm256_loadu_ps(ptrB);
+ t3 = _mm256_loadu_ps(ptrC);
+ t4 = _mm256_loadu_ps(ptrD);
+ tA = _mm_load_ss(ptrA+8);
+ tB = _mm_load_ss(ptrB+8);
+ tC = _mm_load_ss(ptrC+8);
+ tD = _mm_load_ss(ptrD+8);
+
+ t5 = _mm256_unpacklo_ps(x1,y1); /* - - - - | y1b x1b y1a x1a */
+ x1 = _mm256_unpackhi_ps(x1,y1); /* - - - - | y1d x1d y1c x1c */
+ y1 = _mm256_unpacklo_ps(z1,x2); /* - - - - | x2b z1b x2a z1a */
+ z1 = _mm256_unpackhi_ps(z1,x2); /* - - - - | x2d z1d x2c z1c */
+
+ x2 = _mm256_unpacklo_ps(y2,z2); /* - - - - | z2b y2b z2a y2a */
+ y2 = _mm256_unpackhi_ps(y2,z2); /* - - - - | z2d y2d z2c y2c */
+ t6 = _mm256_unpacklo_ps(x3,y3); /* - - - - | y3b x3b y3a x3a */
+ x3 = _mm256_unpackhi_ps(x3,y3); /* - - - - | y3d x3d y3c x3c */
+
+ t5 = _mm256_insertf128_ps(t5, _mm256_castps256_ps128(x2), 0x1); /* z2b y2b z2a y2a | y1b x1b y1a x1a */
+ x1 = _mm256_insertf128_ps(x1, _mm256_castps256_ps128(y2), 0x1); /* z2d y2d z2c y2c | y1d x1d y1c x1c */
+
+ y1 = _mm256_insertf128_ps(y1, _mm256_castps256_ps128(t6), 0x1); /* y3b x3b y3a x3a | x2b z1b x2a z1a */
+ z1 = _mm256_insertf128_ps(z1, _mm256_castps256_ps128(x3), 0x1); /* y3d x3d y3c x3c | x2d z1d x2c z1c */
+
+ z2 = _mm256_shuffle_ps(t5,y1,_MM_SHUFFLE(1,0,1,0)); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
+ t5 = _mm256_shuffle_ps(t5,y1,_MM_SHUFFLE(3,2,3,2)); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
+ y1 = _mm256_shuffle_ps(x1,z1,_MM_SHUFFLE(1,0,1,0)); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
+ x1 = _mm256_shuffle_ps(x1,z1,_MM_SHUFFLE(3,2,3,2)); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
+
+ t1 = _mm256_sub_ps(t1,z2);
+ t2 = _mm256_sub_ps(t2,t5);
+ t3 = _mm256_sub_ps(t3,y1);
+ t4 = _mm256_sub_ps(t4,x1);
+
+ tA = _mm_sub_ss(tA, _mm256_castps256_ps128(z3));
+ tB = _mm_sub_ss(tB, _mm_permute_ps(_mm256_castps256_ps128(z3),_MM_SHUFFLE(1,1,1,1)));
+ tC = _mm_sub_ss(tC, _mm_permute_ps(_mm256_castps256_ps128(z3),_MM_SHUFFLE(2,2,2,2)));
+ tD = _mm_sub_ss(tD, _mm_permute_ps(_mm256_castps256_ps128(z3),_MM_SHUFFLE(3,3,3,3)));
+
+ /* Here we store a full 256-bit value and a separate 32-bit one; no overlap can happen */
+ _mm256_storeu_ps(ptrA,t1);
+ _mm256_storeu_ps(ptrB,t2);
+ _mm256_storeu_ps(ptrC,t3);
+ _mm256_storeu_ps(ptrD,t4);
+ _mm_store_ss(ptrA+8,tA);
+ _mm_store_ss(ptrB+8,tB);
+ _mm_store_ss(ptrC+8,tC);
+ _mm_store_ss(ptrD+8,tD);
+}
+
+
+static gmx_inline void
+gmx_mm256_decrement_4rvec_4ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
+ float * gmx_restrict ptrC, float * gmx_restrict ptrD,
+ __m256 x1, __m256 y1, __m256 z1,
+ __m256 x2, __m256 y2, __m256 z2,
+ __m256 x3, __m256 y3, __m256 z3,
+ __m256 x4, __m256 y4, __m256 z4)
+{
+ __m256 t1,t2,t3,t4,t5;
+ __m128 tA,tB,tC,tD,tE,tF,tG,tH;
+
+ t1 = _mm256_loadu_ps(ptrA);
+ t2 = _mm256_loadu_ps(ptrB);
+ t3 = _mm256_loadu_ps(ptrC);
+ t4 = _mm256_loadu_ps(ptrD);
+ tA = _mm_loadu_ps(ptrA+8);
+ tB = _mm_loadu_ps(ptrB+8);
+ tC = _mm_loadu_ps(ptrC+8);
+ tD = _mm_loadu_ps(ptrD+8);
+
+ t5 = _mm256_unpacklo_ps(x1,y1); /* - - - - | y1b x1b y1a x1a */
+ x1 = _mm256_unpackhi_ps(x1,y1); /* - - - - | y1d x1d y1c x1c */
+ y1 = _mm256_unpacklo_ps(z1,x2); /* - - - - | x2b z1b x2a z1a */
+ z1 = _mm256_unpackhi_ps(z1,x2); /* - - - - | x2d z1d x2c z1c */
+
+ x2 = _mm256_unpacklo_ps(y2,z2); /* - - - - | z2b y2b z2a y2a */
+ y2 = _mm256_unpackhi_ps(y2,z2); /* - - - - | z2d y2d z2c y2c */
+ z2 = _mm256_unpacklo_ps(x3,y3); /* - - - - | y3b x3b y3a x3a */
+ x3 = _mm256_unpackhi_ps(x3,y3); /* - - - - | y3d x3d y3c x3c */
+
+ y3 = _mm256_unpacklo_ps(z3,x4); /* - - - - | x4b z3b x4a z3a */
+ z3 = _mm256_unpackhi_ps(z3,x4); /* - - - - | x4d z3d x4c z3c */
+ x4 = _mm256_unpacklo_ps(y4,z4); /* - - - - | z4b y4b z4a y4a */
+ y4 = _mm256_unpackhi_ps(y4,z4); /* - - - - | z4d y4d z4c y4c */
+
+ x2 = _mm256_insertf128_ps(t5, _mm256_castps256_ps128(x2), 0x1); /* z2b y2b z2a y2a | y1b x1b y1a x1a */
+ x1 = _mm256_insertf128_ps(x1, _mm256_castps256_ps128(y2), 0x1); /* z2d y2d z2c y2c | y1d x1d y1c x1c */
+ y1 = _mm256_insertf128_ps(y1, _mm256_castps256_ps128(z2), 0x1); /* y3b x3b y3a x3a | x2b z1b x2a z1a */
+ z1 = _mm256_insertf128_ps(z1, _mm256_castps256_ps128(x3), 0x1); /* y3d x3d y3c x3c | x2d z1d x2c z1c */
+
+ z2 = _mm256_shuffle_ps(x2,y1,_MM_SHUFFLE(1,0,1,0)); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
+ t5 = _mm256_shuffle_ps(x2,y1,_MM_SHUFFLE(3,2,3,2)); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
+ y1 = _mm256_shuffle_ps(x1,z1,_MM_SHUFFLE(1,0,1,0)); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
+ x1 = _mm256_shuffle_ps(x1,z1,_MM_SHUFFLE(3,2,3,2)); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
+
+ tE = _mm_shuffle_ps(_mm256_castps256_ps128(y3),_mm256_castps256_ps128(x4),_MM_SHUFFLE(1,0,1,0)); /* z4a y4a x4a z3a */
+ tF = _mm_shuffle_ps(_mm256_castps256_ps128(y3),_mm256_castps256_ps128(x4),_MM_SHUFFLE(3,2,3,2)); /* z4b y4b x4b z3b */
+
+ tG = _mm_shuffle_ps(_mm256_castps256_ps128(z3),_mm256_castps256_ps128(y4),_MM_SHUFFLE(1,0,1,0)); /* z4c y4c x4c z3c */
+ tH = _mm_shuffle_ps(_mm256_castps256_ps128(z3),_mm256_castps256_ps128(y4),_MM_SHUFFLE(3,2,3,2)); /* z4d y4d x4d z3d */
+
+ t1 = _mm256_sub_ps(t1,z2);
+ t2 = _mm256_sub_ps(t2,t5);
+ t3 = _mm256_sub_ps(t3,y1);
+ t4 = _mm256_sub_ps(t4,x1);
+
+ tA = _mm_sub_ps(tA,tE);
+ tB = _mm_sub_ps(tB,tF);
+ tC = _mm_sub_ps(tC,tG);
+ tD = _mm_sub_ps(tD,tH);
+
+ /* Here we store a full 256-bit value and a separate 128-bit one; no overlap can happen */
+ _mm256_storeu_ps(ptrA,t1);
+ _mm256_storeu_ps(ptrB,t2);
+ _mm256_storeu_ps(ptrC,t3);
+ _mm256_storeu_ps(ptrD,t4);
+ _mm_storeu_ps(ptrA+8,tA);
+ _mm_storeu_ps(ptrB+8,tB);
+ _mm_storeu_ps(ptrC+8,tC);
+ _mm_storeu_ps(ptrD+8,tD);
+}
+
+
+
+static gmx_inline void
+gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
+ float * gmx_restrict ptrC, float * gmx_restrict ptrD,
+ float * gmx_restrict ptrE, float * gmx_restrict ptrF,
+ float * gmx_restrict ptrG, float * gmx_restrict ptrH,
+ __m256 x1, __m256 y1, __m256 z1)
+{
+ __m256 t1,t2,t3,t4,t5,t6;
+ __m256 tA,tB,tC,tD;
+ __m128i mask;
+
+ /* Construct a mask without executing any data loads */
+ mask = _mm_blend_epi16(_mm_setzero_si128(),_mm_cmpeq_epi16(_mm_setzero_si128(),_mm_setzero_si128()),0x3F);
+
+ tA = gmx_mm256_set_m128(_mm_maskload_ps(ptrE,mask),_mm_maskload_ps(ptrA,mask));
+ tB = gmx_mm256_set_m128(_mm_maskload_ps(ptrF,mask),_mm_maskload_ps(ptrB,mask));
+ tC = gmx_mm256_set_m128(_mm_maskload_ps(ptrG,mask),_mm_maskload_ps(ptrC,mask));
+ tD = gmx_mm256_set_m128(_mm_maskload_ps(ptrH,mask),_mm_maskload_ps(ptrD,mask));
+ t1 = _mm256_unpacklo_ps(x1,y1); /* y1f x1f y1e x1e | y1b x1b y1a x1a */
+ t2 = _mm256_unpackhi_ps(x1,y1); /* y1h x1h y1g x1g | y1d x1d y1c x1c */
+
+ t3 = _mm256_shuffle_ps(t1,z1,_MM_SHUFFLE(0,0,1,0)); /* - z1e y1e x1e | - z1a y1a x1a */
+ t4 = _mm256_shuffle_ps(t1,z1,_MM_SHUFFLE(0,1,3,2)); /* - z1f y1f x1f | - z1b y1b x1b */
+ t5 = _mm256_shuffle_ps(t2,z1,_MM_SHUFFLE(0,2,1,0)); /* - z1g y1g x1g | - z1c y1c x1c */
+ t6 = _mm256_shuffle_ps(t2,z1,_MM_SHUFFLE(0,3,3,2)); /* - z1h y1h x1h | - z1d y1d x1d */
+
+ tA = _mm256_sub_ps(tA,t3);
+ tB = _mm256_sub_ps(tB,t4);
+ tC = _mm256_sub_ps(tC,t5);
+ tD = _mm256_sub_ps(tD,t6);
+
+ _mm_maskstore_ps(ptrA,mask,_mm256_castps256_ps128(tA));
+ _mm_maskstore_ps(ptrB,mask,_mm256_castps256_ps128(tB));
+ _mm_maskstore_ps(ptrC,mask,_mm256_castps256_ps128(tC));
+ _mm_maskstore_ps(ptrD,mask,_mm256_castps256_ps128(tD));
+ _mm_maskstore_ps(ptrE,mask,_mm256_extractf128_ps(tA,0x1));
+ _mm_maskstore_ps(ptrF,mask,_mm256_extractf128_ps(tB,0x1));
+ _mm_maskstore_ps(ptrG,mask,_mm256_extractf128_ps(tC,0x1));
+ _mm_maskstore_ps(ptrH,mask,_mm256_extractf128_ps(tD,0x1));
+}
+
+
+
+static gmx_inline void
+gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
+ float * gmx_restrict ptrC, float * gmx_restrict ptrD,
+ float * gmx_restrict ptrE, float * gmx_restrict ptrF,
+ float * gmx_restrict ptrG, float * gmx_restrict ptrH,
+ __m256 x1, __m256 y1, __m256 z1,
+ __m256 x2, __m256 y2, __m256 z2,
+ __m256 x3, __m256 y3, __m256 z3)
+{
+ __m256 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12;
+ __m256 tA,tB,tC,tD,tE,tF,tG,tH;
+ __m256 tI,tJ,tK,tL;
+
+ tA = _mm256_loadu_ps(ptrA);
+ tB = _mm256_loadu_ps(ptrB);
+ tC = _mm256_loadu_ps(ptrC);
+ tD = _mm256_loadu_ps(ptrD);
+ tE = _mm256_loadu_ps(ptrE);
+ tF = _mm256_loadu_ps(ptrF);
+ tG = _mm256_loadu_ps(ptrG);
+ tH = _mm256_loadu_ps(ptrH);
+
+ t1 = _mm256_unpacklo_ps(x1,y1); /* y1f x1f y1e x1e | y1b x1b y1a x1a */
+ t2 = _mm256_unpackhi_ps(x1,y1); /* y1h x1h y1g x1g | y1d x1d y1c x1c */
+ t3 = _mm256_unpacklo_ps(z1,x2); /* x2f z1f x2e z1e | x2b z1b x2a z1a */
+ t4 = _mm256_unpackhi_ps(z1,x2); /* x2h z1h x2g z1g | x2d z1d x2c z1c */
+
+ t5 = _mm256_unpacklo_ps(y2,z2); /* z2f y2f z2e y2e | z2b y2b z2a y2a */
+ t6 = _mm256_unpackhi_ps(y2,z2); /* z2h y2h z2g y2g | z2d y2d z2c y2c */
+ t7 = _mm256_unpacklo_ps(x3,y3); /* y3f x3f y3e x3e | y3b x3b y3a x3a */
+ t8 = _mm256_unpackhi_ps(x3,y3); /* y3h x3h y3g x3g | y3d x3d y3c x3c */
+
+ t9 = _mm256_shuffle_ps(t1,t3,_MM_SHUFFLE(1,0,1,0)); /* x2e z1e y1e x1e | x2a z1a y1a x1a */
+ t10 = _mm256_shuffle_ps(t1,t3,_MM_SHUFFLE(3,2,3,2)); /* x2f z1f y1f x1f | x2b z1b y1b x1b */
+ t11 = _mm256_shuffle_ps(t2,t4,_MM_SHUFFLE(1,0,1,0)); /* x2g z1g y1g x1g | x2c z1c y1c x1c */
+ t12 = _mm256_shuffle_ps(t2,t4,_MM_SHUFFLE(3,2,3,2)); /* x2h z1h y1h x1h | x2d z1d y1d x1d */
+
+ t1 = _mm256_shuffle_ps(t5,t7,_MM_SHUFFLE(1,0,1,0)); /* y3e x3e z2e y2e | y3a x3a z2a y2a */
+ t2 = _mm256_shuffle_ps(t5,t7,_MM_SHUFFLE(3,2,3,2)); /* y3f x3f z2f y2f | y3b x3b z2b y2b */
+ t3 = _mm256_shuffle_ps(t6,t8,_MM_SHUFFLE(1,0,1,0)); /* y3g x3g z2g y2g | y3c x3c z2c y2c */
+ t4 = _mm256_shuffle_ps(t6,t8,_MM_SHUFFLE(3,2,3,2)); /* y3h x3h z2h y2h | y3d x3d z2d y2d */
+
+ t5 = gmx_mm256_unpack128lo_ps(t9,t1); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
+ t6 = gmx_mm256_unpack128hi_ps(t9,t1); /* y3e x3e z2e y2e | x2e z1e y1e x1e */
+ t7 = gmx_mm256_unpack128lo_ps(t10,t2); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
+ t8 = gmx_mm256_unpack128hi_ps(t10,t2); /* y3f x3f z2f y2f | x2f z1f y1f x1f */
+ t1 = gmx_mm256_unpack128lo_ps(t11,t3); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
+ t2 = gmx_mm256_unpack128hi_ps(t11,t3); /* y3g x3g z2g y2g | x2g z1g y1g x1g */
+ t9 = gmx_mm256_unpack128lo_ps(t12,t4); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
+ t10 = gmx_mm256_unpack128hi_ps(t12,t4); /* y3h x3h z2h y2h | x2h z1h y1h x1h */
+
+ tA = _mm256_sub_ps(tA,t5);
+ tB = _mm256_sub_ps(tB,t7);
+ tC = _mm256_sub_ps(tC,t1);
+ tD = _mm256_sub_ps(tD,t9);
+ tE = _mm256_sub_ps(tE,t6);
+ tF = _mm256_sub_ps(tF,t8);
+ tG = _mm256_sub_ps(tG,t2);
+ tH = _mm256_sub_ps(tH,t10);
+
+ _mm256_storeu_ps(ptrA,tA);
+ _mm256_storeu_ps(ptrB,tB);
+ _mm256_storeu_ps(ptrC,tC);
+ _mm256_storeu_ps(ptrD,tD);
+ _mm256_storeu_ps(ptrE,tE);
+ _mm256_storeu_ps(ptrF,tF);
+ _mm256_storeu_ps(ptrG,tG);
+ _mm256_storeu_ps(ptrH,tH);
+
+ tI = gmx_mm256_set_m128(_mm_load_ss(ptrE+8),_mm_load_ss(ptrA+8));
+ tJ = gmx_mm256_set_m128(_mm_load_ss(ptrF+8),_mm_load_ss(ptrB+8));
+ tK = gmx_mm256_set_m128(_mm_load_ss(ptrG+8),_mm_load_ss(ptrC+8));
+ tL = gmx_mm256_set_m128(_mm_load_ss(ptrH+8),_mm_load_ss(ptrD+8));
+
+ tI = _mm256_unpacklo_ps(tI,tK); /* - - zG zE | - - zC zA */
+ tJ = _mm256_unpacklo_ps(tJ,tL); /* - - zH zF | - - zD zB */
+ tI = _mm256_unpacklo_ps(tI,tJ); /* zH zG zF zE | zD zC zB zA */
+
+ tI = _mm256_sub_ps(tI,z3);
+ tJ = _mm256_permute_ps(tI,_MM_SHUFFLE(1,1,1,1));
+ tK = _mm256_permute_ps(tI,_MM_SHUFFLE(2,2,2,2));
+ tL = _mm256_permute_ps(tI,_MM_SHUFFLE(3,3,3,3));
+
+ _mm_store_ss(ptrA+8,_mm256_castps256_ps128(tI));
+ _mm_store_ss(ptrB+8,_mm256_castps256_ps128(tJ));
+ _mm_store_ss(ptrC+8,_mm256_castps256_ps128(tK));
+ _mm_store_ss(ptrD+8,_mm256_castps256_ps128(tL));
+ _mm_store_ss(ptrE+8,_mm256_extractf128_ps(tI,0x1));
+ _mm_store_ss(ptrF+8,_mm256_extractf128_ps(tJ,0x1));
+ _mm_store_ss(ptrG+8,_mm256_extractf128_ps(tK,0x1));
+ _mm_store_ss(ptrH+8,_mm256_extractf128_ps(tL,0x1));
+}
+
+
+static gmx_inline void
+gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(float * gmx_restrict ptrA, float * gmx_restrict ptrB,
+ float * gmx_restrict ptrC, float * gmx_restrict ptrD,
+ float * gmx_restrict ptrE, float * gmx_restrict ptrF,
+ float * gmx_restrict ptrG, float * gmx_restrict ptrH,
+ __m256 x1, __m256 y1, __m256 z1,
+ __m256 x2, __m256 y2, __m256 z2,
+ __m256 x3, __m256 y3, __m256 z3,
+ __m256 x4, __m256 y4, __m256 z4)
+{
+ __m256 t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12;
+ __m256 tA,tB,tC,tD,tE,tF,tG,tH;
+ __m256 tI,tJ,tK,tL;
+
+ tA = _mm256_loadu_ps(ptrA);
+ tB = _mm256_loadu_ps(ptrB);
+ tC = _mm256_loadu_ps(ptrC);
+ tD = _mm256_loadu_ps(ptrD);
+ tE = _mm256_loadu_ps(ptrE);
+ tF = _mm256_loadu_ps(ptrF);
+ tG = _mm256_loadu_ps(ptrG);
+ tH = _mm256_loadu_ps(ptrH);
+
+ t1 = _mm256_unpacklo_ps(x1,y1); /* y1f x1f y1e x1e | y1b x1b y1a x1a */
+ t2 = _mm256_unpackhi_ps(x1,y1); /* y1h x1h y1g x1g | y1d x1d y1c x1c */
+ t3 = _mm256_unpacklo_ps(z1,x2); /* x2f z1f x2e z1e | x2b z1b x2a z1a */
+ t4 = _mm256_unpackhi_ps(z1,x2); /* x2h z1h x2g z1g | x2d z1d x2c z1c */
+
+ t5 = _mm256_unpacklo_ps(y2,z2); /* z2f y2f z2e y2e | z2b y2b z2a y2a */
+ t6 = _mm256_unpackhi_ps(y2,z2); /* z2h y2h z2g y2g | z2d y2d z2c y2c */
+ t7 = _mm256_unpacklo_ps(x3,y3); /* y3f x3f y3e x3e | y3b x3b y3a x3a */
+ t8 = _mm256_unpackhi_ps(x3,y3); /* y3h x3h y3g x3g | y3d x3d y3c x3c */
+
+ t9 = _mm256_shuffle_ps(t1,t3,_MM_SHUFFLE(1,0,1,0)); /* x2e z1e y1e x1e | x2a z1a y1a x1a */
+ t10 = _mm256_shuffle_ps(t1,t3,_MM_SHUFFLE(3,2,3,2)); /* x2f z1f y1f x1f | x2b z1b y1b x1b */
+ t11 = _mm256_shuffle_ps(t2,t4,_MM_SHUFFLE(1,0,1,0)); /* x2g z1g y1g x1g | x2c z1c y1c x1c */
+ t12 = _mm256_shuffle_ps(t2,t4,_MM_SHUFFLE(3,2,3,2)); /* x2h z1h y1h x1h | x2d z1d y1d x1d */
+
+ t1 = _mm256_shuffle_ps(t5,t7,_MM_SHUFFLE(1,0,1,0)); /* y3e x3e z2e y2e | y3a x3a z2a y2a */
+ t2 = _mm256_shuffle_ps(t5,t7,_MM_SHUFFLE(3,2,3,2)); /* y3f x3f z2f y2f | y3b x3b z2b y2b */
+ t3 = _mm256_shuffle_ps(t6,t8,_MM_SHUFFLE(1,0,1,0)); /* y3g x3g z2g y2g | y3c x3c z2c y2c */
+ t4 = _mm256_shuffle_ps(t6,t8,_MM_SHUFFLE(3,2,3,2)); /* y3h x3h z2h y2h | y3d x3d z2d y2d */
+
+ t5 = gmx_mm256_unpack128lo_ps(t9,t1); /* y3a x3a z2a y2a | x2a z1a y1a x1a */
+ t6 = gmx_mm256_unpack128hi_ps(t9,t1); /* y3e x3e z2e y2e | x2e z1e y1e x1e */
+ t7 = gmx_mm256_unpack128lo_ps(t10,t2); /* y3b x3b z2b y2b | x2b z1b y1b x1b */
+ t8 = gmx_mm256_unpack128hi_ps(t10,t2); /* y3f x3f z2f y2f | x2f z1f y1f x1f */
+ t1 = gmx_mm256_unpack128lo_ps(t11,t3); /* y3c x3c z2c y2c | x2c z1c y1c x1c */
+ t2 = gmx_mm256_unpack128hi_ps(t11,t3); /* y3g x3g z2g y2g | x2g z1g y1g x1g */
+ t9 = gmx_mm256_unpack128lo_ps(t12,t4); /* y3d x3d z2d y2d | x2d z1d y1d x1d */
+ t10 = gmx_mm256_unpack128hi_ps(t12,t4); /* y3h x3h z2h y2h | x2h z1h y1h x1h */
+
+ tA = _mm256_sub_ps(tA,t5);
+ tB = _mm256_sub_ps(tB,t7);
+ tC = _mm256_sub_ps(tC,t1);
+ tD = _mm256_sub_ps(tD,t9);
+ tE = _mm256_sub_ps(tE,t6);
+ tF = _mm256_sub_ps(tF,t8);
+ tG = _mm256_sub_ps(tG,t2);
+ tH = _mm256_sub_ps(tH,t10);
+
+ _mm256_storeu_ps(ptrA,tA);
+ _mm256_storeu_ps(ptrB,tB);
+ _mm256_storeu_ps(ptrC,tC);
+ _mm256_storeu_ps(ptrD,tD);
+ _mm256_storeu_ps(ptrE,tE);
+ _mm256_storeu_ps(ptrF,tF);
+ _mm256_storeu_ps(ptrG,tG);
+ _mm256_storeu_ps(ptrH,tH);
+
+ tI = gmx_mm256_set_m128(_mm_loadu_ps(ptrE+8),_mm_loadu_ps(ptrA+8));
+ tJ = gmx_mm256_set_m128(_mm_loadu_ps(ptrF+8),_mm_loadu_ps(ptrB+8));
+ tK = gmx_mm256_set_m128(_mm_loadu_ps(ptrG+8),_mm_loadu_ps(ptrC+8));
+ tL = gmx_mm256_set_m128(_mm_loadu_ps(ptrH+8),_mm_loadu_ps(ptrD+8));
+
+ t1 = _mm256_unpacklo_ps(z3,x4); /* x4f z3f x4e z3e | x4b z3b x4a z3a */
+ t2 = _mm256_unpackhi_ps(z3,x4); /* x4h z3h x4g z3g | x4d z3d x4c z3c */
+ t3 = _mm256_unpacklo_ps(y4,z4); /* z4f y4f z4e y4e | z4b y4b z4a y4a */
+ t4 = _mm256_unpackhi_ps(y4,z4); /* z4h y4h z4g y4g | z4d y4d z4c y4c */
+
+ t5 = _mm256_shuffle_ps(t1,t3,_MM_SHUFFLE(1,0,1,0)); /* z4e y4e x4e z3e | z4a y4a x4a z3a */
+ t6 = _mm256_shuffle_ps(t1,t3,_MM_SHUFFLE(3,2,3,2)); /* z4f y4f x4f z3f | z4b y4b x4b z3b */
+ t7 = _mm256_shuffle_ps(t2,t4,_MM_SHUFFLE(1,0,1,0)); /* z4g y4g x4g z3g | z4c y4c x4c z3c */
+ t8 = _mm256_shuffle_ps(t2,t4,_MM_SHUFFLE(3,2,3,2)); /* z4h y4h x4h z3h | z4d y4d x4d z3d */
+
+ tI = _mm256_sub_ps(tI,t5);
+ tJ = _mm256_sub_ps(tJ,t6);
+ tK = _mm256_sub_ps(tK,t7);
+ tL = _mm256_sub_ps(tL,t8);
+
+ _mm_storeu_ps(ptrA+8,_mm256_castps256_ps128(tI));
+ _mm_storeu_ps(ptrB+8,_mm256_castps256_ps128(tJ));
+ _mm_storeu_ps(ptrC+8,_mm256_castps256_ps128(tK));
+ _mm_storeu_ps(ptrD+8,_mm256_castps256_ps128(tL));
+ _mm_storeu_ps(ptrE+8,_mm256_extractf128_ps(tI,0x1));
+ _mm_storeu_ps(ptrF+8,_mm256_extractf128_ps(tJ,0x1));
+ _mm_storeu_ps(ptrG+8,_mm256_extractf128_ps(tK,0x1));
+ _mm_storeu_ps(ptrH+8,_mm256_extractf128_ps(tL,0x1));
+}
+
+
+
+static gmx_inline void
+gmx_mm256_update_iforce_1atom_swizzle_ps(__m256 fix1, __m256 fiy1, __m256 fiz1,
+ float * gmx_restrict fptr,
+ float * gmx_restrict fshiftptr)
+{
+ __m128 t1,t2,t3;
+
+ fix1 = _mm256_hadd_ps(fix1,fix1);
+ fiy1 = _mm256_hadd_ps(fiy1,fiz1);
+ fix1 = _mm256_hadd_ps(fix1,fiy1); /* fiz1 fiy1 fix1 fix1 (in both lanes) */
+
+ /* Add across the two lanes */
+ t1 = _mm_add_ps(_mm256_castps256_ps128(fix1),_mm256_extractf128_ps(fix1,0x1));
+
+ t2 = _mm_load_ss(fptr);
+ t2 = _mm_loadh_pi(t2,(__m64 *)(fptr+1));
+ t3 = _mm_load_ss(fshiftptr);
+ t3 = _mm_loadh_pi(t3,(__m64 *)(fshiftptr+1));
+
+ t2 = _mm_add_ps(t2,t1);
+ t3 = _mm_add_ps(t3,t1);
+
+ _mm_store_ss(fptr,t2);
+ _mm_storeh_pi((__m64 *)(fptr+1),t2);
+ _mm_store_ss(fshiftptr,t3);
+ _mm_storeh_pi((__m64 *)(fshiftptr+1),t3);
+}
+
+static gmx_inline void
+gmx_mm256_update_iforce_3atom_swizzle_ps(__m256 fix1, __m256 fiy1, __m256 fiz1,
+ __m256 fix2, __m256 fiy2, __m256 fiz2,
+ __m256 fix3, __m256 fiy3, __m256 fiz3,
+ float * gmx_restrict fptr,
+ float * gmx_restrict fshiftptr)
+{
+ __m256 t1,t2,t3;
+ __m128 tA,tB,tC;
+
+ fix1 = _mm256_hadd_ps(fix1,fiy1); /* Y1g+Y1h Y1e+Y1f X1g+X1h X1e+X1f | Y1c+Y1d Y1a+Y1b X1c+X1d X1a+X1b */
+ fiz1 = _mm256_hadd_ps(fiz1,fix2); /* X2g+X2h X2e+X2f Z1g+Z1h Z1e+Z1f | X2c+X2d X2a+X2b Z1c+Z1d Z1a+Z1b */
+ fiy2 = _mm256_hadd_ps(fiy2,fiz2); /* Z2g+Z2h Z2e+Z2f Y2g+Y2h Y2e+Y2f | Z2c+Z2d Z2a+Z2b Y2c+Y2d Y2a+Y2b */
+ fix3 = _mm256_hadd_ps(fix3,fiy3); /* Y3g+Y3h Y3e+Y3f X3g+X3h X3e+X3f | Y3c+Y3d Y3a+Y3b X3c+X3d X3a+X3b */
+ fiz3 = _mm256_hadd_ps(fiz3,_mm256_setzero_ps()); /* 0 0 Z3g+Z3h Z3e+Z3f | 0 0 Z3c+Z3d Z3a+Z3b */
+
+ fix1 = _mm256_hadd_ps(fix1,fiz1); /* X2e-h Z1e-h Y1e-h X1e-h | X2a-d Z1a-d Y1a-d X1a-d */
+ fiy2 = _mm256_hadd_ps(fiy2,fix3); /* Y3e-h X3e-h Z2e-h Y2e-h | Y3a-d X3a-d Z2a-d Y2a-d */
+ fiz3 = _mm256_hadd_ps(fiz3,_mm256_setzero_ps()); /* 0 0 0 Z3e-h | 0 0 0 Z3a-d */
+
+ /* Add across the two lanes by swapping and adding back */
+ t1 = gmx_mm256_unpack128lo_ps(fix1,fiy2); /* Y3a-d X3a-d Z2a-d Y2a-d | X2a-d Z1a-d Y1a-d X1a-d */
+ t2 = gmx_mm256_unpack128hi_ps(fix1,fiy2); /* Y3e-h X3e-h Z2e-h Y2e-h | X2e-h Z1e-h Y1e-h X1e-h */
+ t1 = _mm256_add_ps(t1,t2); /* y3 x3 z2 y2 | x2 z1 y1 x1 */
+
+ tA = _mm_add_ps(_mm256_castps256_ps128(fiz3),_mm256_extractf128_ps(fiz3,0x1)); /* 0 0 0 z3 */
+
+ t3 = _mm256_loadu_ps(fptr);
+ t3 = _mm256_add_ps(t3,t1);
+ _mm256_storeu_ps(fptr,t3);
+ tB = _mm_load_ss(fptr+8);
+ tB = _mm_add_ss(tB,tA);
+ _mm_store_ss(fptr+8,tB);
+
+ /* Add up shift force */
+ tB = _mm256_extractf128_ps(t1,0x1); /* y3 x3 z2 y2 */
+ tC = _mm_shuffle_ps(_mm256_castps256_ps128(t1),tB,_MM_SHUFFLE(1,0,3,3)); /* z2 y2 x2 x2 */
+ tB = _mm_shuffle_ps(tB,tA,_MM_SHUFFLE(1,0,3,2)); /* 0 z3 y3 x3 */
+ tC = _mm_permute_ps(tC,_MM_SHUFFLE(3,3,2,0)); /* - z2 y2 x2 */
+
+ tB = _mm_add_ps(tB,_mm256_castps256_ps128(t1));
+ tA = _mm_add_ps(tB,tC); /* - z y x */
+
+ tA = _mm_blend_ps(_mm_setzero_ps(),tA,0x7); /* 0 z y x */
+
+ tC = _mm_loadu_ps(fshiftptr);
+ tC = _mm_add_ps(tC,tA);
+ _mm_storeu_ps(fshiftptr,tC);
+}
+
+
+static gmx_inline void
+gmx_mm256_update_iforce_4atom_swizzle_ps(__m256 fix1, __m256 fiy1, __m256 fiz1,
+ __m256 fix2, __m256 fiy2, __m256 fiz2,
+ __m256 fix3, __m256 fiy3, __m256 fiz3,
+ __m256 fix4, __m256 fiy4, __m256 fiz4,
+ float * gmx_restrict fptr,
+ float * gmx_restrict fshiftptr)
+{
+ __m256 t1,t2,t3;
+ __m128 tA,tB,tC;
+
+ fix1 = _mm256_hadd_ps(fix1,fiy1); /* Y1g+Y1h Y1e+Y1f X1g+X1h X1e+X1f | Y1c+Y1d Y1a+Y1b X1c+X1d X1a+X1b */
+ fiz1 = _mm256_hadd_ps(fiz1,fix2); /* X2g+X2h X2e+X2f Z1g+Z1h Z1e+Z1f | X2c+X2d X2a+X2b Z1c+Z1d Z1a+Z1b */
+ fiy2 = _mm256_hadd_ps(fiy2,fiz2); /* Z2g+Z2h Z2e+Z2f Y2g+Y2h Y2e+Y2f | Z2c+Z2d Z2a+Z2b Y2c+Y2d Y2a+Y2b */
+ fix3 = _mm256_hadd_ps(fix3,fiy3); /* Y3g+Y3h Y3e+Y3f X3g+X3h X3e+X3f | Y3c+Y3d Y3a+Y3b X3c+X3d X3a+X3b */
+ fiz3 = _mm256_hadd_ps(fiz3,fix4); /* X4g+X4h X4e+X4f Z3g+Z3h Z3e+Z3f | X4c+X4d X4a+X4b Z3c+Z3d Z3a+Z3b */
+ fiy4 = _mm256_hadd_ps(fiy4,fiz4); /* Z4g+Z4h Z4e+Z4f Y4g+Y4h Y4e+Y4f | Z4c+Z4d Z4a+Z4b Y4c+Y4d Y4a+Y4b */
+
+ fix1 = _mm256_hadd_ps(fix1,fiz1); /* X2e-h Z1e-h Y1e-h X1e-h | X2a-d Z1a-d Y1a-d X1a-d */
+ fiy2 = _mm256_hadd_ps(fiy2,fix3); /* Y3e-h X3e-h Z2e-h Y2e-h | Y3a-d X3a-d Z2a-d Y2a-d */
+ fiz3 = _mm256_hadd_ps(fiz3,fiy4); /* Z4e-h Y4e-h X4e-h Z3e-h | Z4a-d Y4a-d X4a-d Z3a-d */
+
+ /* Add across the two lanes by swapping and adding back */
+ t1 = gmx_mm256_unpack128lo_ps(fix1,fiy2); /* Y3a-d X3a-d Z2a-d Y2a-d | X2a-d Z1a-d Y1a-d X1a-d */
+ t2 = gmx_mm256_unpack128hi_ps(fix1,fiy2); /* Y3e-h X3e-h Z2e-h Y2e-h | X2e-h Z1e-h Y1e-h X1e-h */
+ t1 = _mm256_add_ps(t1,t2); /* y3 x3 z2 y2 | x2 z1 y1 x1 */
+
+ tA = _mm_add_ps(_mm256_castps256_ps128(fiz3),_mm256_extractf128_ps(fiz3,0x1)); /* z4 y4 x4 z3 */
+
+ t3 = _mm256_loadu_ps(fptr);
+ t3 = _mm256_add_ps(t3,t1);
+ _mm256_storeu_ps(fptr,t3);
+
+ tB = _mm_loadu_ps(fptr+8);
+ tB = _mm_add_ps(tB,tA);
+ _mm_storeu_ps(fptr+8,tB);
+
+ /* Add up shift force */
+ tB = _mm256_extractf128_ps(t1,0x1); /* y3 x3 z2 y2 */
+ tC = _mm_shuffle_ps(_mm256_castps256_ps128(t1),tB,_MM_SHUFFLE(1,0,3,3)); /* z2 y2 x2 x2 */
+ tB = _mm_shuffle_ps(tB,tA,_MM_SHUFFLE(1,0,3,2)); /* 0 z3 y3 x3 */
+ tC = _mm_permute_ps(tC,_MM_SHUFFLE(3,3,2,0)); /* - z2 y2 x2 */
+ tA = _mm_permute_ps(tA,_MM_SHUFFLE(0,3,2,1)); /* - z4 y4 x4 */
+
+ tB = _mm_add_ps(tB,_mm256_castps256_ps128(t1));
+ tA = _mm_add_ps(tA,tC);
+ tA = _mm_add_ps(tA,tB);
+
+ tA = _mm_blend_ps(_mm_setzero_ps(),tA,0x7); /* 0 z y x */
+
+ tC = _mm_loadu_ps(fshiftptr);
+ tC = _mm_add_ps(tC,tA);
+ _mm_storeu_ps(fshiftptr,tC);
+}
+
+
+
+static gmx_inline void
+gmx_mm256_update_1pot_ps(__m256 pot1, float * gmx_restrict ptrA)
+{
+ __m128 t1;
+
+ pot1 = _mm256_hadd_ps(pot1,pot1);
+ pot1 = _mm256_hadd_ps(pot1,pot1);
+
+ t1 = _mm_add_ps(_mm256_castps256_ps128(pot1),_mm256_extractf128_ps(pot1,0x1));
+
+ _mm_store_ss(ptrA,_mm_add_ss(_mm_load_ss(ptrA),t1));
+}
+
+static gmx_inline void
+gmx_mm256_update_2pot_ps(__m256 pot1, float * gmx_restrict ptrA,
+ __m256 pot2, float * gmx_restrict ptrB)
+{
+ __m128 t1,t2;
+
+ pot1 = _mm256_hadd_ps(pot1,pot2);
+ pot1 = _mm256_hadd_ps(pot1,pot1);
+
+ t1 = _mm_add_ps(_mm256_castps256_ps128(pot1),_mm256_extractf128_ps(pot1,0x1));
+
+ t2 = _mm_permute_ps(t1,_MM_SHUFFLE(1,1,1,1));
+ _mm_store_ss(ptrA,_mm_add_ss(_mm_load_ss(ptrA),t1));
+ _mm_store_ss(ptrB,_mm_add_ss(_mm_load_ss(ptrB),t2));
+}
+
+
+static gmx_inline void
+gmx_mm256_update_4pot_ps(__m256 pot1, float * gmx_restrict ptrA,
+ __m256 pot2, float * gmx_restrict ptrB,
+ __m256 pot3, float * gmx_restrict ptrC,
+ __m256 pot4, float * gmx_restrict ptrD)
+{
+ __m128 t1,t2,t3,t4;
+
+ pot1 = _mm256_hadd_ps(pot1,pot2);
+ pot3 = _mm256_hadd_ps(pot3,pot4);
+ pot1 = _mm256_hadd_ps(pot1,pot3);
+ t1 = _mm_add_ps(_mm256_castps256_ps128(pot1),_mm256_extractf128_ps(pot1,0x1));
+ t2 = _mm_permute_ps(t1,_MM_SHUFFLE(1,1,1,1));
+ t3 = _mm_permute_ps(t1,_MM_SHUFFLE(2,2,2,2));
+ t4 = _mm_permute_ps(t1,_MM_SHUFFLE(3,3,3,3));
+ _mm_store_ss(ptrA,_mm_add_ss(_mm_load_ss(ptrA),t1));
+ _mm_store_ss(ptrB,_mm_add_ss(_mm_load_ss(ptrB),t2));
+ _mm_store_ss(ptrC,_mm_add_ss(_mm_load_ss(ptrC),t3));
+ _mm_store_ss(ptrD,_mm_add_ss(_mm_load_ss(ptrD),t4));
+}
+
+
+#endif /* _kernelutil_x86_avx_256_single_h_ */
--- /dev/null
+#!/usr/bin/python
+
+import sys
+import os
+sys.path.append ( "../preprocessor" )
+from gmxpreprocess import gmxpreprocess
+
+# "The happiest programs are programs that write other programs."
+#
+#
+# This script controls the generation of Gromacs nonbonded kernels.
+#
+# We no longer generate kernels on-the-fly, so this file is not run
+# during a Gromacs compile - only when we need to update the kernels (=rarely).
+#
+# To maximize performance, each combination of interactions in Gromacs
+# has a separate nonbonded kernel without conditionals in the code.
+# To avoid writing hundreds of different routines for each architecture,
+# we instead use a custom preprocessor so we can encode the conditionals
+# and expand for-loops (e.g, for water-water interactions)
+# from a general kernel template. While that file will contain quite a
+# few preprocessor directives, it is still an order of magnitude easier
+# to maintain than ~200 different kernels (not to mention it avoids bugs).
+#
+# To actually generate the kernels, this program iteratively calls the
+# preprocessor with different define settings corresponding to all
+# combinations of coulomb/van-der-Waals/geometry options.
+#
+# A main goal in the design was to make this new generator _general_. For
+# this reason we have used a lot of different fields to identify a particular
+# kernel and interaction. Basically, each kernel will have a name like
+#
+# nbkernel_ElecXX_VdwYY_GeomZZ_VF_QQ()
+#
+# Where XX/YY/ZZ/VF are strings to identify what the kernel computes.
+#
+# Elec/Vdw describe the type of interaction for electrostatics and van der Waals.
+# The geometry settings correspond e.g. to water-water or water-particle kernels,
+# and finally the VF setting is V,F,or VF depending on whether we calculate
+# only the potential, only the force, or both of them. The final string (QQ)
+# is the architecture/language/optimization of the kernel.
+#
+Arch = 'avx_256_single'
+
+# Explanation of the 'properties':
+#
+# It is cheap to compute r^2, and the kernels require various other functions of r for
+# different kinds of interaction. Depending on the needs of the kernel and the available
+# processor instructions, this will be done in different ways.
+#
+# 'rinv' means we need 1/r, which is calculated as 1/sqrt(r^2).
+# 'rinvsq' means we need 1/(r*r). This is calculated as rinv*rinv if we already did rinv, otherwise 1/r^2.
+# 'r' is similarly calculated as r^2*rinv when needed
+# 'table' means the interaction is tabulated, in which case we will calculate a table index before the interaction
+# 'shift' means the interaction will be modified by a constant to make it zero at the cutoff.
+# 'cutoff' means the interaction is set to 0.0 outside the cutoff
+#
+
+FileHeader = \
+'/*\n' \
+' * Note: this file was generated by the Gromacs '+Arch+' kernel generator.\n' \
+' *\n' \
+' * This source code is part of\n' \
+' *\n' \
+' * G R O M A C S\n' \
+' *\n' \
+' * Copyright (c) 2001-2012, The GROMACS Development Team\n' \
+' *\n' \
+' * Gromacs is a library for molecular simulation and trajectory analysis,\n' \
+' * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for\n' \
+' * a full list of developers and information, check out http://www.gromacs.org\n' \
+' *\n' \
+' * This program is free software; you can redistribute it and/or modify it under\n' \
+' * the terms of the GNU Lesser General Public License as published by the Free\n' \
+' * Software Foundation; either version 2 of the License, or (at your option) any\n' \
+' * later version.\n' \
+' *\n' \
+' * To help fund GROMACS development, we humbly ask that you cite\n' \
+' * the papers people have written on it - you can find them on the website.\n' \
+' */\n'
+
+###############################################
+# ELECTROSTATICS
+# Interactions and flags for them
+###############################################
+ElectrostaticsList = {
+ 'None' : [],
+ 'Coulomb' : ['rinv','rinvsq'],
+ 'ReactionField' : ['rinv','rinvsq'],
+ 'GeneralizedBorn' : ['rinv','r'],
+ 'CubicSplineTable' : ['rinv','r','table'],
+ 'Ewald' : ['rinv','rinvsq','r'],
+}
+
+
+###############################################
+# VAN DER WAALS
+# Interactions and flags for them
+###############################################
+VdwList = {
+ 'None' : [],
+ 'LennardJones' : ['rinvsq'],
+# 'Buckingham' : ['rinv','rinvsq','r'], # Disabled for AVX_256 to reduce number of kernels and simply the template
+ 'CubicSplineTable' : ['rinv','r','table'],
+}
+
+
+###############################################
+# MODIFIERS
+# Different ways to adjust/modify interactions to conserve energy
+###############################################
+ModifierList = {
+ 'None' : [],
+ 'ExactCutoff' : ['exactcutoff'], # Zero the interaction outside the cutoff, used for reaction-field-zero
+ 'PotentialShift' : ['shift','exactcutoff'],
+ 'PotentialSwitch' : ['rinv','r','switch','exactcutoff']
+}
+
+
+###############################################
+# GEOMETRY COMBINATIONS
+###############################################
+GeometryNameList = [
+ [ 'Particle' , 'Particle' ],
+ [ 'Water3' , 'Particle' ],
+ [ 'Water3' , 'Water3' ],
+ [ 'Water4' , 'Particle' ],
+ [ 'Water4' , 'Water4' ]
+]
+
+
+###############################################
+# POTENTIAL / FORCE
+###############################################
+VFList = [
+ 'PotentialAndForce',
+# 'Potential', # Not used yet
+ 'Force'
+]
+
+
+###############################################
+# GEOMETRY PROPERTIES
+###############################################
+# Dictionaries with lists telling which interactions are present
+# 1,2,3 means particles 1,2,3 (but not 0) have electrostatics!
+GeometryElectrostatics = {
+ 'Particle' : [ 0 ],
+ 'Particle2' : [ 0 , 1 ],
+ 'Particle3' : [ 0 , 1 , 2 ],
+ 'Particle4' : [ 0 , 1 , 2 , 3 ],
+ 'Water3' : [ 0 , 1 , 2 ],
+ 'Water4' : [ 1 , 2 , 3 ]
+}
+
+GeometryVdw = {
+ 'Particle' : [ 0 ],
+ 'Particle2' : [ 0 , 1 ],
+ 'Particle3' : [ 0 , 1 , 2 ],
+ 'Particle4' : [ 0 , 1 , 2 , 3 ],
+ 'Water3' : [ 0 ],
+ 'Water4' : [ 0 ]
+}
+
+
+
+
+# Dictionary to abbreviate all strings (mixed from all the lists)
+Abbreviation = {
+ 'None' : 'None',
+ 'Coulomb' : 'Coul',
+ 'Ewald' : 'Ew',
+ 'ReactionField' : 'RF',
+ 'GeneralizedBorn' : 'GB',
+ 'CubicSplineTable' : 'CSTab',
+ 'LennardJones' : 'LJ',
+ 'Buckingham' : 'Bham',
+ 'PotentialShift' : 'Sh',
+ 'PotentialSwitch' : 'Sw',
+ 'ExactCutoff' : 'Cut',
+ 'PotentialAndForce' : 'VF',
+ 'Potential' : 'V',
+ 'Force' : 'F',
+ 'Water3' : 'W3',
+ 'Water4' : 'W4',
+ 'Particle' : 'P1',
+ 'Particle2' : 'P2',
+ 'Particle3' : 'P3',
+ 'Particle4' : 'P4'
+}
+
+
+###############################################
+# Functions
+###############################################
+
+# Return a string with the kernel name from current settings
+def MakeKernelFileName(KernelElec,KernelElecMod,KernelVdw,KernelVdwMod,KernelGeom):
+ ElecStr = 'Elec' + Abbreviation[KernelElec]
+ if(KernelElecMod!='None'):
+ ElecStr = ElecStr + Abbreviation[KernelElecMod]
+ VdwStr = 'Vdw' + Abbreviation[KernelVdw]
+ if(KernelVdwMod!='None'):
+ VdwStr = VdwStr + Abbreviation[KernelVdwMod]
+ GeomStr = 'Geom' + Abbreviation[KernelGeom[0]] + Abbreviation[KernelGeom[1]]
+ return 'nb_kernel_' + ElecStr + '_' + VdwStr + '_' + GeomStr + '_' + Arch
+
+def MakeKernelName(KernelElec,KernelElecMod,KernelVdw,KernelVdwMod,KernelGeom,KernelVF):
+ ElecStr = 'Elec' + Abbreviation[KernelElec]
+ if(KernelElecMod!='None'):
+ ElecStr = ElecStr + Abbreviation[KernelElecMod]
+ VdwStr = 'Vdw' + Abbreviation[KernelVdw]
+ if(KernelVdwMod!='None'):
+ VdwStr = VdwStr + Abbreviation[KernelVdwMod]
+ GeomStr = 'Geom' + Abbreviation[KernelGeom[0]] + Abbreviation[KernelGeom[1]]
+ VFStr = Abbreviation[KernelVF]
+ return 'nb_kernel_' + ElecStr + '_' + VdwStr + '_' + GeomStr + '_' + VFStr + '_' + Arch
+
+# Return a string with a declaration to use for the kernel;
+# this will be a sequence of string combinations as well as the actual function name
+# Dont worry about field widths - that is just pretty-printing for the header!
+def MakeKernelDecl(KernelName,KernelElec,KernelElecMod,KernelVdw,KernelVdwMod,KernelGeom,KernelOther,KernelVF):
+ KernelStr = '\"'+KernelName+'\"'
+ ArchStr = '\"'+Arch+'\"'
+ ElecStr = '\"'+KernelElec+'\"'
+ ElecModStr = '\"'+KernelElecMod+'\"'
+ VdwStr = '\"'+KernelVdw+'\"'
+ VdwModStr = '\"'+KernelVdwMod+'\"'
+ GeomStr = '\"'+KernelGeom[0]+KernelGeom[1]+'\"'
+ OtherStr = '\"'+KernelOther+'\"'
+ VFStr = '\"'+KernelVF+'\"'
+
+ ThisSpec = ArchStr+', '+ElecStr+', '+ElecModStr+', '+VdwStr+', '+VdwModStr+', '+GeomStr+', '+OtherStr+', '+VFStr
+ ThisDecl = ' { '+KernelName+', '+KernelStr+', '+ThisSpec+' }'
+ return ThisDecl
+
+
+# Returns 1 if this kernel should be created, 0 if we should skip it
+# This routine is not critical - it is not the end of the world if we create more kernels,
+# but since the number is pretty large we save both space and compile-time by reducing it a bit.
+def KeepKernel(KernelElec,KernelElecMod,KernelVdw,KernelVdwMod,KernelGeom,KernelVF):
+
+ # No need for kernels without interactions
+ if(KernelElec=='None' and KernelVdw=='None'):
+ return 0
+
+ # No need for modifiers without interactions
+ if((KernelElec=='None' and KernelElecMod!='None') or (KernelVdw=='None' and KernelVdwMod!='None')):
+ return 0
+
+ # No need for LJ-only water optimization, or water optimization with implicit solvent.
+ if('Water' in KernelGeom[0] and (KernelElec=='None' or 'GeneralizedBorn' in KernelElec)):
+ return 0
+
+ # Non-matching table settings are pointless
+ if( ('Table' in KernelElec) and ('Table' in KernelVdw) and KernelElec!=KernelVdw ):
+ return 0
+
+ # Try to reduce the number of different switch/shift options to get a reasonable number of kernels
+ # For electrostatics, reaction-field can use 'exactcutoff', and ewald can use switch or shift.
+ if(KernelElecMod=='ExactCutoff' and KernelElec!='ReactionField'):
+ return 0
+ if(KernelElecMod in ['PotentialShift','PotentialSwitch'] and KernelElec!='Ewald'):
+ return 0
+ # For Vdw, we support switch and shift for Lennard-Jones/Buckingham
+ if((KernelVdwMod=='ExactCutoff') or
+ (KernelVdwMod in ['PotentialShift','PotentialSwitch'] and KernelVdw not in ['LennardJones','Buckingham'])):
+ return 0
+
+ # Choose either switch or shift and don't mix them...
+ if((KernelElecMod=='PotentialShift' and KernelVdwMod=='PotentialSwitch') or
+ (KernelElecMod=='PotentialSwitch' and KernelVdwMod=='PotentialShift')):
+ return 0
+
+ # Don't use a Vdw kernel with a modifier if the electrostatics one does not have one
+ if(KernelElec!='None' and KernelElecMod=='None' and KernelVdwMod!='None'):
+ return 0
+
+ # Don't use an electrostatics kernel with a modifier if the vdw one does not have one,
+ # unless the electrostatics one is reaction-field with exact cutoff.
+ if(KernelVdw!='None' and KernelVdwMod=='None' and KernelElecMod!='None'):
+ if(KernelElec=='ReactionField' and KernelVdw!='CubicSplineTable'):
+ return 0
+ elif(KernelElec!='ReactionField'):
+ return 0
+
+ return 1
+
+
+
+#
+# The preprocessor will automatically expand the interactions for water and other
+# geometries inside the kernel, but to get this right we need to setup a couple
+# of defines - we do them in a separate routine to keep the main loop clean.
+#
+# While this routine might look a bit complex it is actually quite straightforward,
+# and the best news is that you wont have to modify _anything_ for a new geometry
+# as long as you correctly define its Electrostatics/Vdw geometry in the lists above!
+#
+def SetDefines(KernelElec,KernelElecMod,KernelVdw,KernelVdwMod,KernelGeom,KernelVF,defines):
+ # What is the _name_ for the i/j group geometry?
+ igeometry = KernelGeom[0]
+ jgeometry = KernelGeom[1]
+ # define so we can access it in the source when the preprocessor runs
+ defines['GEOMETRY_I'] = igeometry
+ defines['GEOMETRY_J'] = jgeometry
+
+ # For the i/j groups, extract a python list of which sites have electrostatics
+ # For SPC/TIP3p this will be [1,1,1], while TIP4p (no elec on first site) will be [0,1,1,1]
+ ielec = GeometryElectrostatics[igeometry]
+ jelec = GeometryElectrostatics[jgeometry]
+ # Zero out the corresponding lists in case we dont do Elec
+ if(KernelElec=='None'):
+ ielec = []
+ jelec = []
+
+ # Extract similar interaction lists for Vdw interactions (example for SPC: [1,0,0])
+ iVdw = GeometryVdw[igeometry]
+ jVdw = GeometryVdw[jgeometry]
+
+ # Zero out the corresponding lists in case we dont do Vdw
+ if(KernelVdw=='None'):
+ iVdw = []
+ jVdw = []
+
+ # iany[] and jany[] contains lists of the particles actually used (for interactions) in this kernel
+ iany = list(set(ielec+iVdw)) # convert to+from set to make elements unique
+ jany = list(set(jelec+jVdw))
+
+ defines['PARTICLES_ELEC_I'] = ielec
+ defines['PARTICLES_ELEC_J'] = jelec
+ defines['PARTICLES_VDW_I'] = iVdw
+ defines['PARTICLES_VDW_J'] = jVdw
+ defines['PARTICLES_I'] = iany
+ defines['PARTICLES_J'] = jany
+
+ # elecij,Vdwij are sets with pairs of particles for which the corresponding interaction is done
+ # (and anyij again corresponds to either electrostatics or Vdw)
+ elecij = []
+ Vdwij = []
+ anyij = []
+
+ for i in ielec:
+ for j in jelec:
+ elecij.append([i,j])
+
+ for i in iVdw:
+ for j in jVdw:
+ Vdwij.append([i,j])
+
+ for i in iany:
+ for j in jany:
+ if [i,j] in elecij or [i,j] in Vdwij:
+ anyij.append([i,j])
+
+ defines['PAIRS_IJ'] = anyij
+
+ # Make an 2d list-of-distance-properties-to-calculate for i,j
+ ni = max(iany)+1
+ nj = max(jany)+1
+ # Each element properties[i][j] is an empty list
+ properties = [ [ [] for j in range(0,nj) ] for i in range (0,ni) ]
+ # Add properties to each set
+ for i in range(0,ni):
+ for j in range(0,nj):
+ if [i,j] in elecij:
+ properties[i][j] = properties[i][j] + ['electrostatics'] + ElectrostaticsList[KernelElec] + ModifierList[KernelElecMod]
+ if [i,j] in Vdwij:
+ properties[i][j] = properties[i][j] + ['vdw'] + VdwList[KernelVdw] + ModifierList[KernelVdwMod]
+ # Add rinv if we need r
+ if 'r' in properties[i][j]:
+ properties[i][j] = properties[i][j] + ['rinv']
+ # Add rsq if we need rinv or rinsq
+ if 'rinv' in properties[i][j] or 'rinvsq' in properties[i][j]:
+ properties[i][j] = properties[i][j] + ['rsq']
+
+ defines['INTERACTION_FLAGS'] = properties
+
+
+
+def PrintStatistics(ratio):
+ ratio = 100.0*ratio
+ print '\rGenerating %s nonbonded kernels... %5.1f%%' % (Arch,ratio),
+ sys.stdout.flush()
+
+
+
+defines = {}
+kerneldecl = []
+
+cnt = 0.0
+nelec = len(ElectrostaticsList)
+nVdw = len(VdwList)
+nmod = len(ModifierList)
+ngeom = len(GeometryNameList)
+
+ntot = nelec*nmod*nVdw*nmod*ngeom
+
+numKernels = 0
+
+fpdecl = open('nb_kernel_' + Arch + '.c','w')
+fpdecl.write( FileHeader )
+fpdecl.write( '#ifndef nb_kernel_' + Arch + '_h\n' )
+fpdecl.write( '#define nb_kernel_' + Arch + '_h\n\n' )
+fpdecl.write( '#include "../nb_kernel.h"\n\n' )
+
+for KernelElec in ElectrostaticsList:
+ defines['KERNEL_ELEC'] = KernelElec
+
+ for KernelElecMod in ModifierList:
+ defines['KERNEL_MOD_ELEC'] = KernelElecMod
+
+ for KernelVdw in VdwList:
+ defines['KERNEL_VDW'] = KernelVdw
+
+ for KernelVdwMod in ModifierList:
+ defines['KERNEL_MOD_VDW'] = KernelVdwMod
+
+ for KernelGeom in GeometryNameList:
+
+ cnt += 1
+ KernelFilename = MakeKernelFileName(KernelElec,KernelElecMod,KernelVdw,KernelVdwMod,KernelGeom) + '.c'
+ fpkernel = open(KernelFilename,'w')
+ defines['INCLUDE_HEADER'] = 1 # Include header first time in new file
+ DoHeader = 1
+
+ for KernelVF in VFList:
+
+ KernelName = MakeKernelName(KernelElec,KernelElecMod,KernelVdw,KernelVdwMod,KernelGeom,KernelVF)
+
+ defines['KERNEL_NAME'] = KernelName
+ defines['KERNEL_VF'] = KernelVF
+
+ # Check if this is a valid/sane/usable combination
+ if not KeepKernel(KernelElec,KernelElecMod,KernelVdw,KernelVdwMod,KernelGeom,KernelVF):
+ continue;
+
+ # The overall kernel settings determine what the _kernel_ calculates, but for the water
+ # kernels this does not mean that every pairwise interaction has e.g. Vdw interactions.
+ # This routine sets defines of what to calculate for each pair of particles in those cases.
+ SetDefines(KernelElec,KernelElecMod,KernelVdw,KernelVdwMod,KernelGeom,KernelVF,defines)
+
+ if(DoHeader==1):
+ fpkernel.write( FileHeader )
+
+ gmxpreprocess('nb_kernel_template_' + Arch + '.pre', KernelName+'.tmp' , defines, force=1,contentType='C')
+ numKernels = numKernels + 1
+
+ defines['INCLUDE_HEADER'] = 0 # Header has been included once now
+ DoHeader=0
+
+ # Append temp file contents to the common kernelfile
+ fptmp = open(KernelName+'.tmp','r')
+ fpkernel.writelines(fptmp.readlines())
+ fptmp.close()
+ os.remove(KernelName+'.tmp')
+
+ # Add a declaration for this kernel
+ fpdecl.write('nb_kernel_t ' + KernelName + ';\n');
+
+ # Add declaration to the buffer
+ KernelOther=''
+ kerneldecl.append(MakeKernelDecl(KernelName,KernelElec,KernelElecMod,KernelVdw,KernelVdwMod,KernelGeom,KernelOther,KernelVF))
+
+ filesize = fpkernel.tell()
+ fpkernel.close()
+ if(filesize==0):
+ os.remove(KernelFilename)
+
+ PrintStatistics(cnt/ntot)
+ pass
+ pass
+ pass
+ pass
+pass
+
+# Write out the list of settings and corresponding kernels to the declaration file
+fpdecl.write( '\n\n' )
+fpdecl.write( 'nb_kernel_info_t\n' )
+fpdecl.write( 'kernellist_'+Arch+'[] =\n' )
+fpdecl.write( '{\n' )
+for decl in kerneldecl[0:-1]:
+ fpdecl.write( decl + ',\n' )
+fpdecl.write( kerneldecl[-1] + '\n' )
+fpdecl.write( '};\n\n' )
+fpdecl.write( 'int\n' )
+fpdecl.write( 'kernellist_'+Arch+'_size = sizeof(kernellist_'+Arch+')/sizeof(kernellist_'+Arch+'[0]);\n\n')
+fpdecl.write( '#endif\n')
+fpdecl.close()
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwCSTab_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: CubicSplineTable
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCSTab_VdwCSTab_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_elec_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec_vdw->scale);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 73 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 74 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 9 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*74);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwCSTab_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: CubicSplineTable
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCSTab_VdwCSTab_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_elec_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec_vdw->scale);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 61 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 62 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*62);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwCSTab_GeomW3P1_VF_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water3-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCSTab_VdwCSTab_GeomW3P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_elec_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq10,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq20,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 162 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq10,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq20,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 165 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*165);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwCSTab_GeomW3P1_F_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water3-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCSTab_VdwCSTab_GeomW3P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_elec_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 142 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 145 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*145);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwCSTab_GeomW3W3_VF_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water3-Water3
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCSTab_VdwCSTab_GeomW3W3_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_elec_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r01,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq01,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq01,FF),_mm256_mul_ps(vftabscale,rinv01)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r02,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq02,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq02,FF),_mm256_mul_ps(vftabscale,rinv02)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq10,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq11,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq12,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq20,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq21,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq22,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 417 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+ r01 = _mm256_andnot_ps(dummy_mask,r01);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r01,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq01,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq01,FF),_mm256_mul_ps(vftabscale,rinv01)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+ r02 = _mm256_andnot_ps(dummy_mask,r02);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r02,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq02,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq02,FF),_mm256_mul_ps(vftabscale,rinv02)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq10,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq11,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq12,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq20,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq21,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq22,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 426 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*426);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwCSTab_GeomW3W3_F_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water3-Water3
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCSTab_VdwCSTab_GeomW3W3_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_elec_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r01,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq01,FF),_mm256_mul_ps(vftabscale,rinv01)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r02,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq02,FF),_mm256_mul_ps(vftabscale,rinv02)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 373 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+ r01 = _mm256_andnot_ps(dummy_mask,r01);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r01,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq01,FF),_mm256_mul_ps(vftabscale,rinv01)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+ r02 = _mm256_andnot_ps(dummy_mask,r02);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r02,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq02,FF),_mm256_mul_ps(vftabscale,rinv02)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 382 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*382);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwCSTab_GeomW4P1_VF_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water4-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCSTab_VdwCSTab_GeomW4P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_elec_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq10,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq20,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r30,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq30,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq30,FF),_mm256_mul_ps(vftabscale,rinv30)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 188 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq10,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq20,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+ r30 = _mm256_andnot_ps(dummy_mask,r30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r30,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq30,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq30,FF),_mm256_mul_ps(vftabscale,rinv30)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 192 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*192);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwCSTab_GeomW4P1_F_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water4-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCSTab_VdwCSTab_GeomW4P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_elec_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r30,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq30,FF),_mm256_mul_ps(vftabscale,rinv30)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 168 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+ r30 = _mm256_andnot_ps(dummy_mask,r30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r30,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq30,FF),_mm256_mul_ps(vftabscale,rinv30)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 172 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*172);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwCSTab_GeomW4W4_VF_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water4-Water4
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCSTab_VdwCSTab_GeomW4W4_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_elec_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq11,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq12,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r13,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq13,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq13,FF),_mm256_mul_ps(vftabscale,rinv13)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq21,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq22,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r23,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq23,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq23,FF),_mm256_mul_ps(vftabscale,rinv23)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r31,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq31,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq31,FF),_mm256_mul_ps(vftabscale,rinv31)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r32,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq32,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq32,FF),_mm256_mul_ps(vftabscale,rinv32)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r33,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq33,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq33,FF),_mm256_mul_ps(vftabscale,rinv33)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 446 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq11,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq12,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+ r13 = _mm256_andnot_ps(dummy_mask,r13);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r13,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq13,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq13,FF),_mm256_mul_ps(vftabscale,rinv13)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq21,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq22,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+ r23 = _mm256_andnot_ps(dummy_mask,r23);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r23,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq23,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq23,FF),_mm256_mul_ps(vftabscale,rinv23)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+ r31 = _mm256_andnot_ps(dummy_mask,r31);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r31,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq31,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq31,FF),_mm256_mul_ps(vftabscale,rinv31)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+ r32 = _mm256_andnot_ps(dummy_mask,r32);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r32,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq32,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq32,FF),_mm256_mul_ps(vftabscale,rinv32)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+ r33 = _mm256_andnot_ps(dummy_mask,r33);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r33,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq33,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq33,FF),_mm256_mul_ps(vftabscale,rinv33)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 456 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*456);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwCSTab_GeomW4W4_F_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water4-Water4
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCSTab_VdwCSTab_GeomW4W4_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_elec_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r13,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq13,FF),_mm256_mul_ps(vftabscale,rinv13)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r23,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq23,FF),_mm256_mul_ps(vftabscale,rinv23)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r31,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq31,FF),_mm256_mul_ps(vftabscale,rinv31)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r32,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq32,FF),_mm256_mul_ps(vftabscale,rinv32)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r33,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq33,FF),_mm256_mul_ps(vftabscale,rinv33)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 402 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+ r13 = _mm256_andnot_ps(dummy_mask,r13);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r13,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq13,FF),_mm256_mul_ps(vftabscale,rinv13)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+ r23 = _mm256_andnot_ps(dummy_mask,r23);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r23,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq23,FF),_mm256_mul_ps(vftabscale,rinv23)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+ r31 = _mm256_andnot_ps(dummy_mask,r31);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r31,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq31,FF),_mm256_mul_ps(vftabscale,rinv31)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+ r32 = _mm256_andnot_ps(dummy_mask,r32);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r32,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq32,FF),_mm256_mul_ps(vftabscale,rinv32)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+ r33 = _mm256_andnot_ps(dummy_mask,r33);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r33,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq33,FF),_mm256_mul_ps(vftabscale,rinv33)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 412 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*412);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwLJ_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCSTab_VdwLJ_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_elec->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec->scale);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 56 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 57 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 9 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*57);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwLJ_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCSTab_VdwLJ_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_elec->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec->scale);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 47 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 48 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*48);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwLJ_GeomW3P1_VF_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCSTab_VdwLJ_GeomW3P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_elec->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq10,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq20,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 145 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq10,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq20,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 148 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*148);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwLJ_GeomW3P1_F_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCSTab_VdwLJ_GeomW3P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_elec->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 128 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 131 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*131);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwLJ_GeomW3W3_VF_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Water3
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCSTab_VdwLJ_GeomW3W3_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_elec->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r01,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq01,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq01,FF),_mm256_mul_ps(vftabscale,rinv01)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r02,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq02,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq02,FF),_mm256_mul_ps(vftabscale,rinv02)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq10,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq11,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq12,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq20,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq21,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq22,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 400 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+ r01 = _mm256_andnot_ps(dummy_mask,r01);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r01,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq01,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq01,FF),_mm256_mul_ps(vftabscale,rinv01)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+ r02 = _mm256_andnot_ps(dummy_mask,r02);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r02,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq02,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq02,FF),_mm256_mul_ps(vftabscale,rinv02)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq10,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq11,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq12,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq20,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq21,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq22,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 409 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*409);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwLJ_GeomW3W3_F_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Water3
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCSTab_VdwLJ_GeomW3W3_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_elec->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r01,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq01,FF),_mm256_mul_ps(vftabscale,rinv01)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r02,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq02,FF),_mm256_mul_ps(vftabscale,rinv02)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 359 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+ r01 = _mm256_andnot_ps(dummy_mask,r01);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r01,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq01,FF),_mm256_mul_ps(vftabscale,rinv01)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+ r02 = _mm256_andnot_ps(dummy_mask,r02);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r02,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq02,FF),_mm256_mul_ps(vftabscale,rinv02)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 368 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*368);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwLJ_GeomW4P1_VF_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCSTab_VdwLJ_GeomW4P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_elec->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq10,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq20,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r30,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq30,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq30,FF),_mm256_mul_ps(vftabscale,rinv30)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 164 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq10,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq20,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+ r30 = _mm256_andnot_ps(dummy_mask,r30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r30,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq30,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq30,FF),_mm256_mul_ps(vftabscale,rinv30)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 167 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*167);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwLJ_GeomW4P1_F_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCSTab_VdwLJ_GeomW4P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_elec->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r30,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq30,FF),_mm256_mul_ps(vftabscale,rinv30)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 147 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+ r30 = _mm256_andnot_ps(dummy_mask,r30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r30,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq30,FF),_mm256_mul_ps(vftabscale,rinv30)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 150 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*150);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwLJ_GeomW4W4_VF_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Water4
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCSTab_VdwLJ_GeomW4W4_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_elec->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq11,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq12,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r13,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq13,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq13,FF),_mm256_mul_ps(vftabscale,rinv13)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq21,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq22,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r23,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq23,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq23,FF),_mm256_mul_ps(vftabscale,rinv23)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r31,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq31,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq31,FF),_mm256_mul_ps(vftabscale,rinv31)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r32,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq32,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq32,FF),_mm256_mul_ps(vftabscale,rinv32)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r33,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq33,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq33,FF),_mm256_mul_ps(vftabscale,rinv33)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 422 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq11,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq12,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+ r13 = _mm256_andnot_ps(dummy_mask,r13);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r13,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq13,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq13,FF),_mm256_mul_ps(vftabscale,rinv13)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq21,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq22,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+ r23 = _mm256_andnot_ps(dummy_mask,r23);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r23,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq23,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq23,FF),_mm256_mul_ps(vftabscale,rinv23)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+ r31 = _mm256_andnot_ps(dummy_mask,r31);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r31,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq31,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq31,FF),_mm256_mul_ps(vftabscale,rinv31)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+ r32 = _mm256_andnot_ps(dummy_mask,r32);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r32,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq32,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq32,FF),_mm256_mul_ps(vftabscale,rinv32)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+ r33 = _mm256_andnot_ps(dummy_mask,r33);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r33,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq33,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq33,FF),_mm256_mul_ps(vftabscale,rinv33)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 431 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*431);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwLJ_GeomW4W4_F_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Water4
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCSTab_VdwLJ_GeomW4W4_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_elec->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r13,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq13,FF),_mm256_mul_ps(vftabscale,rinv13)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r23,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq23,FF),_mm256_mul_ps(vftabscale,rinv23)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r31,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq31,FF),_mm256_mul_ps(vftabscale,rinv31)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r32,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq32,FF),_mm256_mul_ps(vftabscale,rinv32)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r33,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq33,FF),_mm256_mul_ps(vftabscale,rinv33)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 381 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+ r13 = _mm256_andnot_ps(dummy_mask,r13);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r13,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq13,FF),_mm256_mul_ps(vftabscale,rinv13)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+ r23 = _mm256_andnot_ps(dummy_mask,r23);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r23,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq23,FF),_mm256_mul_ps(vftabscale,rinv23)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+ r31 = _mm256_andnot_ps(dummy_mask,r31);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r31,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq31,FF),_mm256_mul_ps(vftabscale,rinv31)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+ r32 = _mm256_andnot_ps(dummy_mask,r32);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r32,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq32,FF),_mm256_mul_ps(vftabscale,rinv32)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+ r33 = _mm256_andnot_ps(dummy_mask,r33);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r33,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq33,FF),_mm256_mul_ps(vftabscale,rinv33)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 390 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*390);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwNone_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: None
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCSTab_VdwNone_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ vftab = kernel_data->table_elec->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec->scale);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 43 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 44 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 8 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VF,outeriter*8 + inneriter*44);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwNone_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: None
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCSTab_VdwNone_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ vftab = kernel_data->table_elec->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec->scale);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 39 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 40 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_F,outeriter*7 + inneriter*40);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwNone_GeomW3P1_VF_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: None
+ * Geometry: Water3-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCSTab_VdwNone_GeomW3P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ vftab = kernel_data->table_elec->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq10,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq20,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 132 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq10,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq20,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 135 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_VF,outeriter*19 + inneriter*135);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwNone_GeomW3P1_F_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: None
+ * Geometry: Water3-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCSTab_VdwNone_GeomW3P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ vftab = kernel_data->table_elec->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 120 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 123 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_F,outeriter*18 + inneriter*123);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwNone_GeomW3W3_VF_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: None
+ * Geometry: Water3-Water3
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCSTab_VdwNone_GeomW3W3_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ vftab = kernel_data->table_elec->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r01,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq01,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq01,FF),_mm256_mul_ps(vftabscale,rinv01)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r02,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq02,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq02,FF),_mm256_mul_ps(vftabscale,rinv02)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq10,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq11,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq12,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq20,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq21,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq22,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 387 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+ r01 = _mm256_andnot_ps(dummy_mask,r01);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r01,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq01,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq01,FF),_mm256_mul_ps(vftabscale,rinv01)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+ r02 = _mm256_andnot_ps(dummy_mask,r02);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r02,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq02,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq02,FF),_mm256_mul_ps(vftabscale,rinv02)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq10,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq11,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq12,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq20,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq21,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq22,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 396 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_VF,outeriter*19 + inneriter*396);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwNone_GeomW3W3_F_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: None
+ * Geometry: Water3-Water3
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCSTab_VdwNone_GeomW3W3_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ vftab = kernel_data->table_elec->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r01,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq01,FF),_mm256_mul_ps(vftabscale,rinv01)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r02,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq02,FF),_mm256_mul_ps(vftabscale,rinv02)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 351 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq00,FF),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+ r01 = _mm256_andnot_ps(dummy_mask,r01);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r01,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq01,FF),_mm256_mul_ps(vftabscale,rinv01)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+ r02 = _mm256_andnot_ps(dummy_mask,r02);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r02,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq02,FF),_mm256_mul_ps(vftabscale,rinv02)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 360 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_F,outeriter*18 + inneriter*360);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwNone_GeomW4P1_VF_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: None
+ * Geometry: Water4-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCSTab_VdwNone_GeomW4P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ vftab = kernel_data->table_elec->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq10,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq20,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r30,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq30,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq30,FF),_mm256_mul_ps(vftabscale,rinv30)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 132 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq10,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq20,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+ r30 = _mm256_andnot_ps(dummy_mask,r30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r30,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq30,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq30,FF),_mm256_mul_ps(vftabscale,rinv30)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 135 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_VF,outeriter*19 + inneriter*135);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwNone_GeomW4P1_F_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: None
+ * Geometry: Water4-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCSTab_VdwNone_GeomW4P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ vftab = kernel_data->table_elec->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r30,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq30,FF),_mm256_mul_ps(vftabscale,rinv30)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 120 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r10,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq10,FF),_mm256_mul_ps(vftabscale,rinv10)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r20,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq20,FF),_mm256_mul_ps(vftabscale,rinv20)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+ r30 = _mm256_andnot_ps(dummy_mask,r30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r30,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq30,FF),_mm256_mul_ps(vftabscale,rinv30)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 123 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_F,outeriter*18 + inneriter*123);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwNone_GeomW4W4_VF_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: None
+ * Geometry: Water4-Water4
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCSTab_VdwNone_GeomW4W4_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ vftab = kernel_data->table_elec->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq11,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq12,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r13,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq13,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq13,FF),_mm256_mul_ps(vftabscale,rinv13)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq21,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq22,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r23,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq23,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq23,FF),_mm256_mul_ps(vftabscale,rinv23)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r31,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq31,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq31,FF),_mm256_mul_ps(vftabscale,rinv31)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r32,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq32,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq32,FF),_mm256_mul_ps(vftabscale,rinv32)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r33,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq33,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq33,FF),_mm256_mul_ps(vftabscale,rinv33)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 387 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq11,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq12,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+ r13 = _mm256_andnot_ps(dummy_mask,r13);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r13,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq13,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq13,FF),_mm256_mul_ps(vftabscale,rinv13)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq21,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq22,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+ r23 = _mm256_andnot_ps(dummy_mask,r23);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r23,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq23,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq23,FF),_mm256_mul_ps(vftabscale,rinv23)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+ r31 = _mm256_andnot_ps(dummy_mask,r31);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r31,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq31,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq31,FF),_mm256_mul_ps(vftabscale,rinv31)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+ r32 = _mm256_andnot_ps(dummy_mask,r32);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r32,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq32,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq32,FF),_mm256_mul_ps(vftabscale,rinv32)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+ r33 = _mm256_andnot_ps(dummy_mask,r33);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r33,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq33,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq33,FF),_mm256_mul_ps(vftabscale,rinv33)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 396 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_VF,outeriter*19 + inneriter*396);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwNone_GeomW4W4_F_avx_256_single
+ * Electrostatics interaction: CubicSplineTable
+ * VdW interaction: None
+ * Geometry: Water4-Water4
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCSTab_VdwNone_GeomW4W4_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ vftab = kernel_data->table_elec->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r13,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq13,FF),_mm256_mul_ps(vftabscale,rinv13)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r23,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq23,FF),_mm256_mul_ps(vftabscale,rinv23)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r31,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq31,FF),_mm256_mul_ps(vftabscale,rinv31)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r32,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq32,FF),_mm256_mul_ps(vftabscale,rinv32)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r33,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq33,FF),_mm256_mul_ps(vftabscale,rinv33)));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 351 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r11,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq11,FF),_mm256_mul_ps(vftabscale,rinv11)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r12,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq12,FF),_mm256_mul_ps(vftabscale,rinv12)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+ r13 = _mm256_andnot_ps(dummy_mask,r13);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r13,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq13,FF),_mm256_mul_ps(vftabscale,rinv13)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r21,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq21,FF),_mm256_mul_ps(vftabscale,rinv21)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r22,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq22,FF),_mm256_mul_ps(vftabscale,rinv22)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+ r23 = _mm256_andnot_ps(dummy_mask,r23);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r23,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq23,FF),_mm256_mul_ps(vftabscale,rinv23)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+ r31 = _mm256_andnot_ps(dummy_mask,r31);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r31,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq31,FF),_mm256_mul_ps(vftabscale,rinv31)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+ r32 = _mm256_andnot_ps(dummy_mask,r32);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r32,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq32,FF),_mm256_mul_ps(vftabscale,rinv32)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+ r33 = _mm256_andnot_ps(dummy_mask,r33);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r33,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq33,FF),_mm256_mul_ps(vftabscale,rinv33)));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 360 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_F,outeriter*18 + inneriter*360);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: CubicSplineTable
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 62 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 63 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 9 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*63);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: CubicSplineTable
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 53 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 54 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*54);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwCSTab_GeomW3P1_VF_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water3-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCoul_VdwCSTab_GeomW3P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 119 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 120 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*120);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwCSTab_GeomW3P1_F_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water3-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCoul_VdwCSTab_GeomW3P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 108 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 109 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*109);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_VF_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water3-Water3
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,rinv01);
+ felec = _mm256_mul_ps(velec,rinvsq01);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,rinv02);
+ felec = _mm256_mul_ps(velec,rinvsq02);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 278 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,rinv01);
+ felec = _mm256_mul_ps(velec,rinvsq01);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,rinv02);
+ felec = _mm256_mul_ps(velec,rinvsq02);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 279 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*279);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_F_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water3-Water3
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,rinv01);
+ felec = _mm256_mul_ps(velec,rinvsq01);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,rinv02);
+ felec = _mm256_mul_ps(velec,rinvsq02);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 261 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,rinv01);
+ felec = _mm256_mul_ps(velec,rinvsq01);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,rinv02);
+ felec = _mm256_mul_ps(velec,rinvsq02);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 262 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*262);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwCSTab_GeomW4P1_VF_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water4-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCoul_VdwCSTab_GeomW4P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,rinv30);
+ felec = _mm256_mul_ps(velec,rinvsq30);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 140 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,rinv30);
+ felec = _mm256_mul_ps(velec,rinvsq30);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 141 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*141);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwCSTab_GeomW4P1_F_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water4-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCoul_VdwCSTab_GeomW4P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,rinv30);
+ felec = _mm256_mul_ps(velec,rinvsq30);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 129 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,rinv30);
+ felec = _mm256_mul_ps(velec,rinvsq30);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 130 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*130);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_VF_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water4-Water4
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,rinv13);
+ felec = _mm256_mul_ps(velec,rinvsq13);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,rinv23);
+ felec = _mm256_mul_ps(velec,rinvsq23);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,rinv31);
+ felec = _mm256_mul_ps(velec,rinvsq31);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,rinv32);
+ felec = _mm256_mul_ps(velec,rinvsq32);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,rinv33);
+ felec = _mm256_mul_ps(velec,rinvsq33);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 302 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,rinv13);
+ felec = _mm256_mul_ps(velec,rinvsq13);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,rinv23);
+ felec = _mm256_mul_ps(velec,rinvsq23);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,rinv31);
+ felec = _mm256_mul_ps(velec,rinvsq31);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,rinv32);
+ felec = _mm256_mul_ps(velec,rinvsq32);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,rinv33);
+ felec = _mm256_mul_ps(velec,rinvsq33);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 303 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*303);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_F_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water4-Water4
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,rinv13);
+ felec = _mm256_mul_ps(velec,rinvsq13);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,rinv23);
+ felec = _mm256_mul_ps(velec,rinvsq23);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,rinv31);
+ felec = _mm256_mul_ps(velec,rinvsq31);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,rinv32);
+ felec = _mm256_mul_ps(velec,rinvsq32);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,rinv33);
+ felec = _mm256_mul_ps(velec,rinvsq33);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 285 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,rinv13);
+ felec = _mm256_mul_ps(velec,rinvsq13);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,rinv23);
+ felec = _mm256_mul_ps(velec,rinvsq23);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,rinv31);
+ felec = _mm256_mul_ps(velec,rinvsq31);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,rinv32);
+ felec = _mm256_mul_ps(velec,rinvsq32);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,rinv33);
+ felec = _mm256_mul_ps(velec,rinvsq33);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 286 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*286);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwLJ_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCoul_VdwLJ_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 39 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 39 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 9 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*39);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwLJ_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCoul_VdwLJ_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 33 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 33 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*33);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwLJ_GeomW3P1_VF_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCoul_VdwLJ_GeomW3P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 96 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 96 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*96);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwLJ_GeomW3P1_F_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCoul_VdwLJ_GeomW3P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 88 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 88 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*88);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwLJ_GeomW3W3_VF_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Water3
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCoul_VdwLJ_GeomW3W3_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,rinv01);
+ felec = _mm256_mul_ps(velec,rinvsq01);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,rinv02);
+ felec = _mm256_mul_ps(velec,rinvsq02);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 255 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,rinv01);
+ felec = _mm256_mul_ps(velec,rinvsq01);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,rinv02);
+ felec = _mm256_mul_ps(velec,rinvsq02);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 255 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*255);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwLJ_GeomW3W3_F_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Water3
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCoul_VdwLJ_GeomW3W3_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,rinv01);
+ felec = _mm256_mul_ps(velec,rinvsq01);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,rinv02);
+ felec = _mm256_mul_ps(velec,rinvsq02);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 241 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,rinv01);
+ felec = _mm256_mul_ps(velec,rinvsq01);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,rinv02);
+ felec = _mm256_mul_ps(velec,rinvsq02);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 241 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*241);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwLJ_GeomW4P1_VF_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCoul_VdwLJ_GeomW4P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,rinv30);
+ felec = _mm256_mul_ps(velec,rinvsq30);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 116 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,rinv30);
+ felec = _mm256_mul_ps(velec,rinvsq30);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 116 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*116);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwLJ_GeomW4P1_F_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCoul_VdwLJ_GeomW4P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,rinv30);
+ felec = _mm256_mul_ps(velec,rinvsq30);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 108 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,rinv30);
+ felec = _mm256_mul_ps(velec,rinvsq30);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 108 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*108);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwLJ_GeomW4W4_VF_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Water4
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCoul_VdwLJ_GeomW4W4_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,rinv13);
+ felec = _mm256_mul_ps(velec,rinvsq13);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,rinv23);
+ felec = _mm256_mul_ps(velec,rinvsq23);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,rinv31);
+ felec = _mm256_mul_ps(velec,rinvsq31);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,rinv32);
+ felec = _mm256_mul_ps(velec,rinvsq32);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,rinv33);
+ felec = _mm256_mul_ps(velec,rinvsq33);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 278 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,rinv13);
+ felec = _mm256_mul_ps(velec,rinvsq13);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,rinv23);
+ felec = _mm256_mul_ps(velec,rinvsq23);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,rinv31);
+ felec = _mm256_mul_ps(velec,rinvsq31);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,rinv32);
+ felec = _mm256_mul_ps(velec,rinvsq32);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,rinv33);
+ felec = _mm256_mul_ps(velec,rinvsq33);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 278 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*278);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwLJ_GeomW4W4_F_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Water4
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCoul_VdwLJ_GeomW4W4_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,rinv13);
+ felec = _mm256_mul_ps(velec,rinvsq13);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,rinv23);
+ felec = _mm256_mul_ps(velec,rinvsq23);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,rinv31);
+ felec = _mm256_mul_ps(velec,rinvsq31);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,rinv32);
+ felec = _mm256_mul_ps(velec,rinvsq32);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,rinv33);
+ felec = _mm256_mul_ps(velec,rinvsq33);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 264 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,rinv13);
+ felec = _mm256_mul_ps(velec,rinvsq13);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,rinv23);
+ felec = _mm256_mul_ps(velec,rinvsq23);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,rinv31);
+ felec = _mm256_mul_ps(velec,rinvsq31);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,rinv32);
+ felec = _mm256_mul_ps(velec,rinvsq32);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,rinv33);
+ felec = _mm256_mul_ps(velec,rinvsq33);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 264 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*264);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwNone_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: None
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCoul_VdwNone_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 27 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 27 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 8 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VF,outeriter*8 + inneriter*27);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwNone_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: None
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCoul_VdwNone_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 26 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 26 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_F,outeriter*7 + inneriter*26);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwNone_GeomW3P1_VF_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: None
+ * Geometry: Water3-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCoul_VdwNone_GeomW3P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 84 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 84 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_VF,outeriter*19 + inneriter*84);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwNone_GeomW3P1_F_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: None
+ * Geometry: Water3-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCoul_VdwNone_GeomW3P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 81 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 81 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_F,outeriter*18 + inneriter*81);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwNone_GeomW3W3_VF_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: None
+ * Geometry: Water3-Water3
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCoul_VdwNone_GeomW3W3_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,rinv01);
+ felec = _mm256_mul_ps(velec,rinvsq01);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,rinv02);
+ felec = _mm256_mul_ps(velec,rinvsq02);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 243 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,rinv01);
+ felec = _mm256_mul_ps(velec,rinvsq01);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,rinv02);
+ felec = _mm256_mul_ps(velec,rinvsq02);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 243 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_VF,outeriter*19 + inneriter*243);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwNone_GeomW3W3_F_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: None
+ * Geometry: Water3-Water3
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCoul_VdwNone_GeomW3W3_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,rinv01);
+ felec = _mm256_mul_ps(velec,rinvsq01);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,rinv02);
+ felec = _mm256_mul_ps(velec,rinvsq02);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 234 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(velec,rinvsq00);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,rinv01);
+ felec = _mm256_mul_ps(velec,rinvsq01);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,rinv02);
+ felec = _mm256_mul_ps(velec,rinvsq02);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 234 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_F,outeriter*18 + inneriter*234);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwNone_GeomW4P1_VF_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: None
+ * Geometry: Water4-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCoul_VdwNone_GeomW4P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,rinv30);
+ felec = _mm256_mul_ps(velec,rinvsq30);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 84 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,rinv30);
+ felec = _mm256_mul_ps(velec,rinvsq30);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 84 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_VF,outeriter*19 + inneriter*84);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwNone_GeomW4P1_F_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: None
+ * Geometry: Water4-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCoul_VdwNone_GeomW4P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,rinv30);
+ felec = _mm256_mul_ps(velec,rinvsq30);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 81 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,rinv10);
+ felec = _mm256_mul_ps(velec,rinvsq10);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,rinv20);
+ felec = _mm256_mul_ps(velec,rinvsq20);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,rinv30);
+ felec = _mm256_mul_ps(velec,rinvsq30);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 81 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_F,outeriter*18 + inneriter*81);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwNone_GeomW4W4_VF_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: None
+ * Geometry: Water4-Water4
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecCoul_VdwNone_GeomW4W4_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,rinv13);
+ felec = _mm256_mul_ps(velec,rinvsq13);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,rinv23);
+ felec = _mm256_mul_ps(velec,rinvsq23);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,rinv31);
+ felec = _mm256_mul_ps(velec,rinvsq31);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,rinv32);
+ felec = _mm256_mul_ps(velec,rinvsq32);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,rinv33);
+ felec = _mm256_mul_ps(velec,rinvsq33);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 243 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,rinv13);
+ felec = _mm256_mul_ps(velec,rinvsq13);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,rinv23);
+ felec = _mm256_mul_ps(velec,rinvsq23);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,rinv31);
+ felec = _mm256_mul_ps(velec,rinvsq31);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,rinv32);
+ felec = _mm256_mul_ps(velec,rinvsq32);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,rinv33);
+ felec = _mm256_mul_ps(velec,rinvsq33);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 243 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_VF,outeriter*19 + inneriter*243);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwNone_GeomW4W4_F_avx_256_single
+ * Electrostatics interaction: Coulomb
+ * VdW interaction: None
+ * Geometry: Water4-Water4
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecCoul_VdwNone_GeomW4W4_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,rinv13);
+ felec = _mm256_mul_ps(velec,rinvsq13);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,rinv23);
+ felec = _mm256_mul_ps(velec,rinvsq23);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,rinv31);
+ felec = _mm256_mul_ps(velec,rinvsq31);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,rinv32);
+ felec = _mm256_mul_ps(velec,rinvsq32);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,rinv33);
+ felec = _mm256_mul_ps(velec,rinvsq33);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 234 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,rinv11);
+ felec = _mm256_mul_ps(velec,rinvsq11);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,rinv12);
+ felec = _mm256_mul_ps(velec,rinvsq12);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,rinv13);
+ felec = _mm256_mul_ps(velec,rinvsq13);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,rinv21);
+ felec = _mm256_mul_ps(velec,rinvsq21);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,rinv22);
+ felec = _mm256_mul_ps(velec,rinvsq22);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,rinv23);
+ felec = _mm256_mul_ps(velec,rinvsq23);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,rinv31);
+ felec = _mm256_mul_ps(velec,rinvsq31);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,rinv32);
+ felec = _mm256_mul_ps(velec,rinvsq32);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,rinv33);
+ felec = _mm256_mul_ps(velec,rinvsq33);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 234 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_F,outeriter*18 + inneriter*234);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJSh_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEwSh_VdwLJSh_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 127 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 128 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 9 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*128);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJSh_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEwSh_VdwLJSh_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 66 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 67 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*67);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 348 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 351 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*351);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 187 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 190 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*190);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Water3
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv01,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq01,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv02,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq02,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 999 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+ r01 = _mm256_andnot_ps(dummy_mask,r01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv01,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq01,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+ r02 = _mm256_andnot_ps(dummy_mask,r02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv02,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq02,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 1008 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*1008);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Water3
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 538 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+ r01 = _mm256_andnot_ps(dummy_mask,r01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+ r02 = _mm256_andnot_ps(dummy_mask,r02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 547 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*547);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJSh_GeomW4P1_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEwSh_VdwLJSh_GeomW4P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv30,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq30,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 371 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+ r30 = _mm256_andnot_ps(dummy_mask,r30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv30,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq30,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 374 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*374);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJSh_GeomW4P1_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEwSh_VdwLJSh_GeomW4P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 210 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+ r30 = _mm256_andnot_ps(dummy_mask,r30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 213 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*213);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJSh_GeomW4W4_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Water4
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEwSh_VdwLJSh_GeomW4W4_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv13,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq13,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv23,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq23,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv31,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq31,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv32,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq32,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv33,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq33,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 1025 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+ r13 = _mm256_andnot_ps(dummy_mask,r13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv13,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq13,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+ r23 = _mm256_andnot_ps(dummy_mask,r23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv23,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq23,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+ r31 = _mm256_andnot_ps(dummy_mask,r31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv31,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq31,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+ r32 = _mm256_andnot_ps(dummy_mask,r32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv32,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq32,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+ r33 = _mm256_andnot_ps(dummy_mask,r33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv33,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq33,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 1034 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*1034);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJSh_GeomW4W4_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Water4
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEwSh_VdwLJSh_GeomW4W4_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 564 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+ r13 = _mm256_andnot_ps(dummy_mask,r13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+ r23 = _mm256_andnot_ps(dummy_mask,r23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+ r31 = _mm256_andnot_ps(dummy_mask,r31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+ r32 = _mm256_andnot_ps(dummy_mask,r32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+ r33 = _mm256_andnot_ps(dummy_mask,r33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 573 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*573);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwNone_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEwSh_VdwNone_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 109 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 110 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 8 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VF,outeriter*8 + inneriter*110);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwNone_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEwSh_VdwNone_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 59 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 60 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_F,outeriter*7 + inneriter*60);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwNone_GeomW3P1_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water3-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEwSh_VdwNone_GeomW3P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 330 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 333 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_VF,outeriter*19 + inneriter*333);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwNone_GeomW3P1_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water3-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEwSh_VdwNone_GeomW3P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 180 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 183 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_F,outeriter*18 + inneriter*183);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwNone_GeomW3W3_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water3-Water3
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEwSh_VdwNone_GeomW3W3_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv01,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq01,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv02,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq02,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 981 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+ r01 = _mm256_andnot_ps(dummy_mask,r01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv01,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq01,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+ r02 = _mm256_andnot_ps(dummy_mask,r02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv02,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq02,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 990 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_VF,outeriter*19 + inneriter*990);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwNone_GeomW3W3_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water3-Water3
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEwSh_VdwNone_GeomW3W3_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 531 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+ r01 = _mm256_andnot_ps(dummy_mask,r01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+ r02 = _mm256_andnot_ps(dummy_mask,r02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 540 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_F,outeriter*18 + inneriter*540);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwNone_GeomW4P1_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water4-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEwSh_VdwNone_GeomW4P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv30,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq30,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 330 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+ r30 = _mm256_andnot_ps(dummy_mask,r30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv30,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq30,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 333 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_VF,outeriter*19 + inneriter*333);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwNone_GeomW4P1_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water4-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEwSh_VdwNone_GeomW4P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 180 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+ r30 = _mm256_andnot_ps(dummy_mask,r30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 183 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_F,outeriter*18 + inneriter*183);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwNone_GeomW4W4_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water4-Water4
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEwSh_VdwNone_GeomW4W4_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv13,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq13,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv23,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq23,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv31,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq31,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv32,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq32,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv33,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq33,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 981 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+ r13 = _mm256_andnot_ps(dummy_mask,r13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv13,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq13,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+ r23 = _mm256_andnot_ps(dummy_mask,r23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv23,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq23,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+ r31 = _mm256_andnot_ps(dummy_mask,r31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv31,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq31,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+ r32 = _mm256_andnot_ps(dummy_mask,r32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv32,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq32,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+ r33 = _mm256_andnot_ps(dummy_mask,r33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv33,sh_ewald),pmecorrV);
+ velec = _mm256_mul_ps(qq33,velec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 990 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_VF,outeriter*19 + inneriter*990);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwNone_GeomW4W4_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water4-Water4
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEwSh_VdwNone_GeomW4W4_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 531 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+ r13 = _mm256_andnot_ps(dummy_mask,r13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+ r23 = _mm256_andnot_ps(dummy_mask,r23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+ r31 = _mm256_andnot_ps(dummy_mask,r31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+ r32 = _mm256_andnot_ps(dummy_mask,r32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+ r33 = _mm256_andnot_ps(dummy_mask,r33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 540 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_F,outeriter*18 + inneriter*540);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwLJSw_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEwSw_VdwLJSw_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rcoulomb_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 126 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 127 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 9 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*127);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwLJSw_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEwSw_VdwLJSw_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rcoulomb_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 120 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 121 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*121);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwLJSw_GeomW3P1_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEwSw_VdwLJSw_GeomW3P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rcoulomb_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 345 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 348 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*348);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwLJSw_GeomW3P1_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEwSw_VdwLJSw_GeomW3P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rcoulomb_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 333 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 336 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*336);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Water3
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rcoulomb_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv01,pmecorrV);
+ velec = _mm256_mul_ps(qq01,velec);
+
+ d = _mm256_sub_ps(r01,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv01,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv02,pmecorrV);
+ velec = _mm256_mul_ps(qq02,velec);
+
+ d = _mm256_sub_ps(r02,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv02,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ d = _mm256_sub_ps(r11,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ d = _mm256_sub_ps(r12,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ d = _mm256_sub_ps(r21,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ d = _mm256_sub_ps(r22,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 990 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+ r01 = _mm256_andnot_ps(dummy_mask,r01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv01,pmecorrV);
+ velec = _mm256_mul_ps(qq01,velec);
+
+ d = _mm256_sub_ps(r01,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv01,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+ r02 = _mm256_andnot_ps(dummy_mask,r02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv02,pmecorrV);
+ velec = _mm256_mul_ps(qq02,velec);
+
+ d = _mm256_sub_ps(r02,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv02,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ d = _mm256_sub_ps(r11,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ d = _mm256_sub_ps(r12,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ d = _mm256_sub_ps(r21,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ d = _mm256_sub_ps(r22,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 999 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*999);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Water3
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rcoulomb_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv01,pmecorrV);
+ velec = _mm256_mul_ps(qq01,velec);
+
+ d = _mm256_sub_ps(r01,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv01,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv02,pmecorrV);
+ velec = _mm256_mul_ps(qq02,velec);
+
+ d = _mm256_sub_ps(r02,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv02,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ d = _mm256_sub_ps(r11,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ d = _mm256_sub_ps(r12,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ d = _mm256_sub_ps(r21,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ d = _mm256_sub_ps(r22,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 960 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+ r01 = _mm256_andnot_ps(dummy_mask,r01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv01,pmecorrV);
+ velec = _mm256_mul_ps(qq01,velec);
+
+ d = _mm256_sub_ps(r01,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv01,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+ r02 = _mm256_andnot_ps(dummy_mask,r02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv02,pmecorrV);
+ velec = _mm256_mul_ps(qq02,velec);
+
+ d = _mm256_sub_ps(r02,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv02,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ d = _mm256_sub_ps(r11,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ d = _mm256_sub_ps(r12,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ d = _mm256_sub_ps(r21,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ d = _mm256_sub_ps(r22,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 969 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*969);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rcoulomb_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv30,pmecorrV);
+ velec = _mm256_mul_ps(qq30,velec);
+
+ d = _mm256_sub_ps(r30,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv30,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 386 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+ r30 = _mm256_andnot_ps(dummy_mask,r30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv30,pmecorrV);
+ velec = _mm256_mul_ps(qq30,velec);
+
+ d = _mm256_sub_ps(r30,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv30,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 390 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*390);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rcoulomb_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv30,pmecorrV);
+ velec = _mm256_mul_ps(qq30,velec);
+
+ d = _mm256_sub_ps(r30,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv30,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 374 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+ r30 = _mm256_andnot_ps(dummy_mask,r30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv30,pmecorrV);
+ velec = _mm256_mul_ps(qq30,velec);
+
+ d = _mm256_sub_ps(r30,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv30,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 378 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*378);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Water4
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rcoulomb_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ d = _mm256_sub_ps(r11,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ d = _mm256_sub_ps(r12,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv13,pmecorrV);
+ velec = _mm256_mul_ps(qq13,velec);
+
+ d = _mm256_sub_ps(r13,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv13,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ d = _mm256_sub_ps(r21,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ d = _mm256_sub_ps(r22,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv23,pmecorrV);
+ velec = _mm256_mul_ps(qq23,velec);
+
+ d = _mm256_sub_ps(r23,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv23,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv31,pmecorrV);
+ velec = _mm256_mul_ps(qq31,velec);
+
+ d = _mm256_sub_ps(r31,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv31,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv32,pmecorrV);
+ velec = _mm256_mul_ps(qq32,velec);
+
+ d = _mm256_sub_ps(r32,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv32,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv33,pmecorrV);
+ velec = _mm256_mul_ps(qq33,velec);
+
+ d = _mm256_sub_ps(r33,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv33,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 1034 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ d = _mm256_sub_ps(r11,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ d = _mm256_sub_ps(r12,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+ r13 = _mm256_andnot_ps(dummy_mask,r13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv13,pmecorrV);
+ velec = _mm256_mul_ps(qq13,velec);
+
+ d = _mm256_sub_ps(r13,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv13,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ d = _mm256_sub_ps(r21,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ d = _mm256_sub_ps(r22,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+ r23 = _mm256_andnot_ps(dummy_mask,r23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv23,pmecorrV);
+ velec = _mm256_mul_ps(qq23,velec);
+
+ d = _mm256_sub_ps(r23,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv23,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+ r31 = _mm256_andnot_ps(dummy_mask,r31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv31,pmecorrV);
+ velec = _mm256_mul_ps(qq31,velec);
+
+ d = _mm256_sub_ps(r31,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv31,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+ r32 = _mm256_andnot_ps(dummy_mask,r32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv32,pmecorrV);
+ velec = _mm256_mul_ps(qq32,velec);
+
+ d = _mm256_sub_ps(r32,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv32,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+ r33 = _mm256_andnot_ps(dummy_mask,r33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv33,pmecorrV);
+ velec = _mm256_mul_ps(qq33,velec);
+
+ d = _mm256_sub_ps(r33,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv33,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 1044 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*1044);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Water4
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rcoulomb_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ d = _mm256_sub_ps(r11,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ d = _mm256_sub_ps(r12,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv13,pmecorrV);
+ velec = _mm256_mul_ps(qq13,velec);
+
+ d = _mm256_sub_ps(r13,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv13,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ d = _mm256_sub_ps(r21,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ d = _mm256_sub_ps(r22,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv23,pmecorrV);
+ velec = _mm256_mul_ps(qq23,velec);
+
+ d = _mm256_sub_ps(r23,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv23,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv31,pmecorrV);
+ velec = _mm256_mul_ps(qq31,velec);
+
+ d = _mm256_sub_ps(r31,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv31,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv32,pmecorrV);
+ velec = _mm256_mul_ps(qq32,velec);
+
+ d = _mm256_sub_ps(r32,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv32,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv33,pmecorrV);
+ velec = _mm256_mul_ps(qq33,velec);
+
+ d = _mm256_sub_ps(r33,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv33,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 1004 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ d = _mm256_sub_ps(r11,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ d = _mm256_sub_ps(r12,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+ r13 = _mm256_andnot_ps(dummy_mask,r13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv13,pmecorrV);
+ velec = _mm256_mul_ps(qq13,velec);
+
+ d = _mm256_sub_ps(r13,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv13,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ d = _mm256_sub_ps(r21,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ d = _mm256_sub_ps(r22,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+ r23 = _mm256_andnot_ps(dummy_mask,r23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv23,pmecorrV);
+ velec = _mm256_mul_ps(qq23,velec);
+
+ d = _mm256_sub_ps(r23,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv23,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+ r31 = _mm256_andnot_ps(dummy_mask,r31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv31,pmecorrV);
+ velec = _mm256_mul_ps(qq31,velec);
+
+ d = _mm256_sub_ps(r31,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv31,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+ r32 = _mm256_andnot_ps(dummy_mask,r32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv32,pmecorrV);
+ velec = _mm256_mul_ps(qq32,velec);
+
+ d = _mm256_sub_ps(r32,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv32,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+ r33 = _mm256_andnot_ps(dummy_mask,r33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv33,pmecorrV);
+ velec = _mm256_mul_ps(qq33,velec);
+
+ d = _mm256_sub_ps(r33,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv33,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 1014 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*1014);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwNone_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEwSw_VdwNone_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rcoulomb_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 108 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 109 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 8 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VF,outeriter*8 + inneriter*109);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwNone_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEwSw_VdwNone_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rcoulomb_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 105 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 106 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_F,outeriter*7 + inneriter*106);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwNone_GeomW3P1_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water3-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEwSw_VdwNone_GeomW3P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rcoulomb_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 327 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 330 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_VF,outeriter*19 + inneriter*330);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwNone_GeomW3P1_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water3-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEwSw_VdwNone_GeomW3P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rcoulomb_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 318 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 321 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_F,outeriter*18 + inneriter*321);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwNone_GeomW3W3_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water3-Water3
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEwSw_VdwNone_GeomW3W3_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rcoulomb_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv01,pmecorrV);
+ velec = _mm256_mul_ps(qq01,velec);
+
+ d = _mm256_sub_ps(r01,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv01,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv02,pmecorrV);
+ velec = _mm256_mul_ps(qq02,velec);
+
+ d = _mm256_sub_ps(r02,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv02,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ d = _mm256_sub_ps(r11,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ d = _mm256_sub_ps(r12,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ d = _mm256_sub_ps(r21,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ d = _mm256_sub_ps(r22,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 972 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+ r01 = _mm256_andnot_ps(dummy_mask,r01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv01,pmecorrV);
+ velec = _mm256_mul_ps(qq01,velec);
+
+ d = _mm256_sub_ps(r01,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv01,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+ r02 = _mm256_andnot_ps(dummy_mask,r02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv02,pmecorrV);
+ velec = _mm256_mul_ps(qq02,velec);
+
+ d = _mm256_sub_ps(r02,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv02,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ d = _mm256_sub_ps(r11,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ d = _mm256_sub_ps(r12,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ d = _mm256_sub_ps(r21,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ d = _mm256_sub_ps(r22,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 981 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_VF,outeriter*19 + inneriter*981);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwNone_GeomW3W3_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water3-Water3
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEwSw_VdwNone_GeomW3W3_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rcoulomb_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv01,pmecorrV);
+ velec = _mm256_mul_ps(qq01,velec);
+
+ d = _mm256_sub_ps(r01,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv01,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv02,pmecorrV);
+ velec = _mm256_mul_ps(qq02,velec);
+
+ d = _mm256_sub_ps(r02,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv02,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ d = _mm256_sub_ps(r11,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ d = _mm256_sub_ps(r12,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ d = _mm256_sub_ps(r21,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ d = _mm256_sub_ps(r22,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 945 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+ r01 = _mm256_andnot_ps(dummy_mask,r01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv01,pmecorrV);
+ velec = _mm256_mul_ps(qq01,velec);
+
+ d = _mm256_sub_ps(r01,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv01,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+ r02 = _mm256_andnot_ps(dummy_mask,r02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv02,pmecorrV);
+ velec = _mm256_mul_ps(qq02,velec);
+
+ d = _mm256_sub_ps(r02,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv02,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ d = _mm256_sub_ps(r11,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ d = _mm256_sub_ps(r12,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ d = _mm256_sub_ps(r21,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ d = _mm256_sub_ps(r22,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 954 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_F,outeriter*18 + inneriter*954);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwNone_GeomW4P1_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water4-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEwSw_VdwNone_GeomW4P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rcoulomb_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv30,pmecorrV);
+ velec = _mm256_mul_ps(qq30,velec);
+
+ d = _mm256_sub_ps(r30,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv30,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 327 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+ r30 = _mm256_andnot_ps(dummy_mask,r30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv30,pmecorrV);
+ velec = _mm256_mul_ps(qq30,velec);
+
+ d = _mm256_sub_ps(r30,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv30,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 330 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_VF,outeriter*19 + inneriter*330);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwNone_GeomW4P1_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water4-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEwSw_VdwNone_GeomW4P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rcoulomb_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv30,pmecorrV);
+ velec = _mm256_mul_ps(qq30,velec);
+
+ d = _mm256_sub_ps(r30,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv30,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 318 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ d = _mm256_sub_ps(r10,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ d = _mm256_sub_ps(r20,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+ r30 = _mm256_andnot_ps(dummy_mask,r30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv30,pmecorrV);
+ velec = _mm256_mul_ps(qq30,velec);
+
+ d = _mm256_sub_ps(r30,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv30,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 321 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_F,outeriter*18 + inneriter*321);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwNone_GeomW4W4_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water4-Water4
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEwSw_VdwNone_GeomW4W4_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rcoulomb_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ d = _mm256_sub_ps(r11,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ d = _mm256_sub_ps(r12,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv13,pmecorrV);
+ velec = _mm256_mul_ps(qq13,velec);
+
+ d = _mm256_sub_ps(r13,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv13,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ d = _mm256_sub_ps(r21,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ d = _mm256_sub_ps(r22,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv23,pmecorrV);
+ velec = _mm256_mul_ps(qq23,velec);
+
+ d = _mm256_sub_ps(r23,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv23,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv31,pmecorrV);
+ velec = _mm256_mul_ps(qq31,velec);
+
+ d = _mm256_sub_ps(r31,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv31,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv32,pmecorrV);
+ velec = _mm256_mul_ps(qq32,velec);
+
+ d = _mm256_sub_ps(r32,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv32,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv33,pmecorrV);
+ velec = _mm256_mul_ps(qq33,velec);
+
+ d = _mm256_sub_ps(r33,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv33,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 972 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ d = _mm256_sub_ps(r11,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ d = _mm256_sub_ps(r12,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+ r13 = _mm256_andnot_ps(dummy_mask,r13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv13,pmecorrV);
+ velec = _mm256_mul_ps(qq13,velec);
+
+ d = _mm256_sub_ps(r13,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv13,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ d = _mm256_sub_ps(r21,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ d = _mm256_sub_ps(r22,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+ r23 = _mm256_andnot_ps(dummy_mask,r23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv23,pmecorrV);
+ velec = _mm256_mul_ps(qq23,velec);
+
+ d = _mm256_sub_ps(r23,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv23,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+ r31 = _mm256_andnot_ps(dummy_mask,r31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv31,pmecorrV);
+ velec = _mm256_mul_ps(qq31,velec);
+
+ d = _mm256_sub_ps(r31,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv31,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+ r32 = _mm256_andnot_ps(dummy_mask,r32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv32,pmecorrV);
+ velec = _mm256_mul_ps(qq32,velec);
+
+ d = _mm256_sub_ps(r32,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv32,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+ r33 = _mm256_andnot_ps(dummy_mask,r33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv33,pmecorrV);
+ velec = _mm256_mul_ps(qq33,velec);
+
+ d = _mm256_sub_ps(r33,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv33,_mm256_mul_ps(velec,dsw)) );
+ velec = _mm256_mul_ps(velec,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 981 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_VF,outeriter*19 + inneriter*981);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwNone_GeomW4W4_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water4-Water4
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEwSw_VdwNone_GeomW4W4_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rcoulomb_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ d = _mm256_sub_ps(r11,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ d = _mm256_sub_ps(r12,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv13,pmecorrV);
+ velec = _mm256_mul_ps(qq13,velec);
+
+ d = _mm256_sub_ps(r13,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv13,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ d = _mm256_sub_ps(r21,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ d = _mm256_sub_ps(r22,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv23,pmecorrV);
+ velec = _mm256_mul_ps(qq23,velec);
+
+ d = _mm256_sub_ps(r23,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv23,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv31,pmecorrV);
+ velec = _mm256_mul_ps(qq31,velec);
+
+ d = _mm256_sub_ps(r31,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv31,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv32,pmecorrV);
+ velec = _mm256_mul_ps(qq32,velec);
+
+ d = _mm256_sub_ps(r32,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv32,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv33,pmecorrV);
+ velec = _mm256_mul_ps(qq33,velec);
+
+ d = _mm256_sub_ps(r33,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv33,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 945 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ d = _mm256_sub_ps(r11,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ d = _mm256_sub_ps(r12,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+ r13 = _mm256_andnot_ps(dummy_mask,r13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv13,pmecorrV);
+ velec = _mm256_mul_ps(qq13,velec);
+
+ d = _mm256_sub_ps(r13,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv13,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ d = _mm256_sub_ps(r21,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ d = _mm256_sub_ps(r22,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+ r23 = _mm256_andnot_ps(dummy_mask,r23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv23,pmecorrV);
+ velec = _mm256_mul_ps(qq23,velec);
+
+ d = _mm256_sub_ps(r23,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv23,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+ r31 = _mm256_andnot_ps(dummy_mask,r31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv31,pmecorrV);
+ velec = _mm256_mul_ps(qq31,velec);
+
+ d = _mm256_sub_ps(r31,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv31,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+ r32 = _mm256_andnot_ps(dummy_mask,r32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv32,pmecorrV);
+ velec = _mm256_mul_ps(qq32,velec);
+
+ d = _mm256_sub_ps(r32,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv32,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+ r33 = _mm256_andnot_ps(dummy_mask,r33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv33,pmecorrV);
+ velec = _mm256_mul_ps(qq33,velec);
+
+ d = _mm256_sub_ps(r33,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv33,_mm256_mul_ps(velec,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 954 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_F,outeriter*18 + inneriter*954);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwCSTab_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: CubicSplineTable
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEw_VdwCSTab_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 118 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 119 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 9 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*119);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwCSTab_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: CubicSplineTable
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEw_VdwCSTab_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 82 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 83 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*83);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwCSTab_GeomW3P1_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water3-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEw_VdwCSTab_GeomW3P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 289 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 292 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*292);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwCSTab_GeomW3P1_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water3-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEw_VdwCSTab_GeomW3P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 197 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 200 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*200);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwCSTab_GeomW3W3_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water3-Water3
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEw_VdwCSTab_GeomW3W3_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv01,pmecorrV);
+ velec = _mm256_mul_ps(qq01,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv02,pmecorrV);
+ velec = _mm256_mul_ps(qq02,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 790 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+ r01 = _mm256_andnot_ps(dummy_mask,r01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv01,pmecorrV);
+ velec = _mm256_mul_ps(qq01,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+ r02 = _mm256_andnot_ps(dummy_mask,r02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv02,pmecorrV);
+ velec = _mm256_mul_ps(qq02,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 799 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*799);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwCSTab_GeomW3W3_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water3-Water3
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEw_VdwCSTab_GeomW3W3_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 530 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+ r01 = _mm256_andnot_ps(dummy_mask,r01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+ r02 = _mm256_andnot_ps(dummy_mask,r02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 539 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*539);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwCSTab_GeomW4P1_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water4-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEw_VdwCSTab_GeomW4P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv30,pmecorrV);
+ velec = _mm256_mul_ps(qq30,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 311 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+ r30 = _mm256_andnot_ps(dummy_mask,r30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv30,pmecorrV);
+ velec = _mm256_mul_ps(qq30,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 315 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*315);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwCSTab_GeomW4P1_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water4-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEw_VdwCSTab_GeomW4P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 219 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+ r30 = _mm256_andnot_ps(dummy_mask,r30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 223 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*223);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwCSTab_GeomW4W4_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water4-Water4
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEw_VdwCSTab_GeomW4W4_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv13,pmecorrV);
+ velec = _mm256_mul_ps(qq13,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv23,pmecorrV);
+ velec = _mm256_mul_ps(qq23,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv31,pmecorrV);
+ velec = _mm256_mul_ps(qq31,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv32,pmecorrV);
+ velec = _mm256_mul_ps(qq32,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv33,pmecorrV);
+ velec = _mm256_mul_ps(qq33,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 815 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+ r13 = _mm256_andnot_ps(dummy_mask,r13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv13,pmecorrV);
+ velec = _mm256_mul_ps(qq13,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+ r23 = _mm256_andnot_ps(dummy_mask,r23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv23,pmecorrV);
+ velec = _mm256_mul_ps(qq23,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+ r31 = _mm256_andnot_ps(dummy_mask,r31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv31,pmecorrV);
+ velec = _mm256_mul_ps(qq31,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+ r32 = _mm256_andnot_ps(dummy_mask,r32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv32,pmecorrV);
+ velec = _mm256_mul_ps(qq32,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+ r33 = _mm256_andnot_ps(dummy_mask,r33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv33,pmecorrV);
+ velec = _mm256_mul_ps(qq33,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 825 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*825);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwCSTab_GeomW4W4_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water4-Water4
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEw_VdwCSTab_GeomW4W4_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 555 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+ r13 = _mm256_andnot_ps(dummy_mask,r13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+ r23 = _mm256_andnot_ps(dummy_mask,r23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+ r31 = _mm256_andnot_ps(dummy_mask,r31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+ r32 = _mm256_andnot_ps(dummy_mask,r32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+ r33 = _mm256_andnot_ps(dummy_mask,r33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 565 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*565);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwLJ_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEw_VdwLJ_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 96 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 97 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 9 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*97);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwLJ_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEw_VdwLJ_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 63 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 64 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*64);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwLJ_GeomW3P1_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEw_VdwLJ_GeomW3P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 267 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 270 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*270);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwLJ_GeomW3P1_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEw_VdwLJ_GeomW3P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 178 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 181 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*181);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwLJ_GeomW3W3_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Water3
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEw_VdwLJ_GeomW3W3_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv01,pmecorrV);
+ velec = _mm256_mul_ps(qq01,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv02,pmecorrV);
+ velec = _mm256_mul_ps(qq02,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 768 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+ r01 = _mm256_andnot_ps(dummy_mask,r01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv01,pmecorrV);
+ velec = _mm256_mul_ps(qq01,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+ r02 = _mm256_andnot_ps(dummy_mask,r02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv02,pmecorrV);
+ velec = _mm256_mul_ps(qq02,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 777 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*777);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwLJ_GeomW3W3_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Water3
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEw_VdwLJ_GeomW3W3_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 511 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+ r01 = _mm256_andnot_ps(dummy_mask,r01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+ r02 = _mm256_andnot_ps(dummy_mask,r02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 520 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*520);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwLJ_GeomW4P1_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEw_VdwLJ_GeomW4P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv30,pmecorrV);
+ velec = _mm256_mul_ps(qq30,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 287 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+ r30 = _mm256_andnot_ps(dummy_mask,r30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv30,pmecorrV);
+ velec = _mm256_mul_ps(qq30,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 290 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*290);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwLJ_GeomW4P1_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEw_VdwLJ_GeomW4P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 198 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+ r30 = _mm256_andnot_ps(dummy_mask,r30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 201 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*201);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwLJ_GeomW4W4_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Water4
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEw_VdwLJ_GeomW4W4_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv13,pmecorrV);
+ velec = _mm256_mul_ps(qq13,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv23,pmecorrV);
+ velec = _mm256_mul_ps(qq23,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv31,pmecorrV);
+ velec = _mm256_mul_ps(qq31,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv32,pmecorrV);
+ velec = _mm256_mul_ps(qq32,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv33,pmecorrV);
+ velec = _mm256_mul_ps(qq33,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 791 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+ r13 = _mm256_andnot_ps(dummy_mask,r13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv13,pmecorrV);
+ velec = _mm256_mul_ps(qq13,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+ r23 = _mm256_andnot_ps(dummy_mask,r23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv23,pmecorrV);
+ velec = _mm256_mul_ps(qq23,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+ r31 = _mm256_andnot_ps(dummy_mask,r31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv31,pmecorrV);
+ velec = _mm256_mul_ps(qq31,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+ r32 = _mm256_andnot_ps(dummy_mask,r32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv32,pmecorrV);
+ velec = _mm256_mul_ps(qq32,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+ r33 = _mm256_andnot_ps(dummy_mask,r33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv33,pmecorrV);
+ velec = _mm256_mul_ps(qq33,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 800 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*800);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwLJ_GeomW4W4_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Water4
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEw_VdwLJ_GeomW4W4_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 534 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+ r13 = _mm256_andnot_ps(dummy_mask,r13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+ r23 = _mm256_andnot_ps(dummy_mask,r23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+ r31 = _mm256_andnot_ps(dummy_mask,r31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+ r32 = _mm256_andnot_ps(dummy_mask,r32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+ r33 = _mm256_andnot_ps(dummy_mask,r33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 543 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*543);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwNone_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEw_VdwNone_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 84 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 85 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 8 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VF,outeriter*8 + inneriter*85);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwNone_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEw_VdwNone_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 56 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 57 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_F,outeriter*7 + inneriter*57);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwNone_GeomW3P1_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water3-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEw_VdwNone_GeomW3P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 255 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 258 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_VF,outeriter*19 + inneriter*258);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwNone_GeomW3P1_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water3-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEw_VdwNone_GeomW3P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 171 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 174 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_F,outeriter*18 + inneriter*174);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwNone_GeomW3W3_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water3-Water3
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEw_VdwNone_GeomW3W3_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv01,pmecorrV);
+ velec = _mm256_mul_ps(qq01,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv02,pmecorrV);
+ velec = _mm256_mul_ps(qq02,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 756 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv00,pmecorrV);
+ velec = _mm256_mul_ps(qq00,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+ r01 = _mm256_andnot_ps(dummy_mask,r01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv01,pmecorrV);
+ velec = _mm256_mul_ps(qq01,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+ r02 = _mm256_andnot_ps(dummy_mask,r02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv02,pmecorrV);
+ velec = _mm256_mul_ps(qq02,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 765 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_VF,outeriter*19 + inneriter*765);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwNone_GeomW3W3_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water3-Water3
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEw_VdwNone_GeomW3W3_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 504 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq00);
+ rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq00,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r01 = _mm256_mul_ps(rsq01,rinv01);
+ r01 = _mm256_andnot_ps(dummy_mask,r01);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq01);
+ rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq01,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r02 = _mm256_mul_ps(rsq02,rinv02);
+ r02 = _mm256_andnot_ps(dummy_mask,r02);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq02);
+ rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq02,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 513 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_F,outeriter*18 + inneriter*513);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwNone_GeomW4P1_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water4-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEw_VdwNone_GeomW4P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv30,pmecorrV);
+ velec = _mm256_mul_ps(qq30,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 255 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv10,pmecorrV);
+ velec = _mm256_mul_ps(qq10,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv20,pmecorrV);
+ velec = _mm256_mul_ps(qq20,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+ r30 = _mm256_andnot_ps(dummy_mask,r30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv30,pmecorrV);
+ velec = _mm256_mul_ps(qq30,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 258 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_VF,outeriter*19 + inneriter*258);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwNone_GeomW4P1_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water4-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEw_VdwNone_GeomW4P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 171 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r10 = _mm256_mul_ps(rsq10,rinv10);
+ r10 = _mm256_andnot_ps(dummy_mask,r10);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq10);
+ rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq10,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r20 = _mm256_mul_ps(rsq20,rinv20);
+ r20 = _mm256_andnot_ps(dummy_mask,r20);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq20);
+ rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq20,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r30 = _mm256_mul_ps(rsq30,rinv30);
+ r30 = _mm256_andnot_ps(dummy_mask,r30);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq30);
+ rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq30,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 174 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_F,outeriter*18 + inneriter*174);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwNone_GeomW4W4_VF_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water4-Water4
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecEw_VdwNone_GeomW4W4_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv13,pmecorrV);
+ velec = _mm256_mul_ps(qq13,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv23,pmecorrV);
+ velec = _mm256_mul_ps(qq23,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv31,pmecorrV);
+ velec = _mm256_mul_ps(qq31,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv32,pmecorrV);
+ velec = _mm256_mul_ps(qq32,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv33,pmecorrV);
+ velec = _mm256_mul_ps(qq33,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 756 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv11,pmecorrV);
+ velec = _mm256_mul_ps(qq11,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv12,pmecorrV);
+ velec = _mm256_mul_ps(qq12,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+ r13 = _mm256_andnot_ps(dummy_mask,r13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv13,pmecorrV);
+ velec = _mm256_mul_ps(qq13,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv21,pmecorrV);
+ velec = _mm256_mul_ps(qq21,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv22,pmecorrV);
+ velec = _mm256_mul_ps(qq22,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+ r23 = _mm256_andnot_ps(dummy_mask,r23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv23,pmecorrV);
+ velec = _mm256_mul_ps(qq23,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+ r31 = _mm256_andnot_ps(dummy_mask,r31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv31,pmecorrV);
+ velec = _mm256_mul_ps(qq31,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+ r32 = _mm256_andnot_ps(dummy_mask,r32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv32,pmecorrV);
+ velec = _mm256_mul_ps(qq32,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+ r33 = _mm256_andnot_ps(dummy_mask,r33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ velec = _mm256_sub_ps(rinv33,pmecorrV);
+ velec = _mm256_mul_ps(qq33,velec);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 765 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_VF,outeriter*19 + inneriter*765);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwNone_GeomW4W4_F_avx_256_single
+ * Electrostatics interaction: Ewald
+ * VdW interaction: None
+ * Geometry: Water4-Water4
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecEw_VdwNone_GeomW4W4_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 504 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r11 = _mm256_mul_ps(rsq11,rinv11);
+ r11 = _mm256_andnot_ps(dummy_mask,r11);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq11);
+ rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq11,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r12 = _mm256_mul_ps(rsq12,rinv12);
+ r12 = _mm256_andnot_ps(dummy_mask,r12);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq12);
+ rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq12,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r13 = _mm256_mul_ps(rsq13,rinv13);
+ r13 = _mm256_andnot_ps(dummy_mask,r13);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq13);
+ rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq13,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r21 = _mm256_mul_ps(rsq21,rinv21);
+ r21 = _mm256_andnot_ps(dummy_mask,r21);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq21);
+ rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq21,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r22 = _mm256_mul_ps(rsq22,rinv22);
+ r22 = _mm256_andnot_ps(dummy_mask,r22);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq22);
+ rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq22,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r23 = _mm256_mul_ps(rsq23,rinv23);
+ r23 = _mm256_andnot_ps(dummy_mask,r23);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq23);
+ rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq23,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r31 = _mm256_mul_ps(rsq31,rinv31);
+ r31 = _mm256_andnot_ps(dummy_mask,r31);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq31);
+ rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq31,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r32 = _mm256_mul_ps(rsq32,rinv32);
+ r32 = _mm256_andnot_ps(dummy_mask,r32);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq32);
+ rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq32,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r33 = _mm256_mul_ps(rsq33,rinv33);
+ r33 = _mm256_andnot_ps(dummy_mask,r33);
+
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq33);
+ rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq33,felec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 513 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_F,outeriter*18 + inneriter*513);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecGB_VdwCSTab_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: GeneralizedBorn
+ * VdW interaction: CubicSplineTable
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecGB_VdwCSTab_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i gbitab;
+ __m128i gbitab_lo,gbitab_hi;
+ __m256 vgb,fgb,vgbsum,dvdasum,gbscale,gbtabscale,isaprod,gbqqfactor,gbinvepsdiff,gbeps,dvdatmp;
+ __m256 minushalf = _mm256_set1_ps(-0.5);
+ real *invsqrta,*dvda,*gbtab;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ invsqrta = fr->invsqrta;
+ dvda = fr->dvda;
+ gbtabscale = _mm256_set1_ps(fr->gbtab.scale);
+ gbtab = fr->gbtab.data;
+ gbinvepsdiff = _mm256_set1_ps((1.0/fr->epsilon_r) - (1.0/fr->gb_epsilon_solvent));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ isai0 = _mm256_set1_ps(invsqrta[inr+0]);
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vgbsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+ dvdasum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ isaj0 = gmx_mm256_load_8real_swizzle_ps(invsqrta+jnrA+0,invsqrta+jnrB+0,
+ invsqrta+jnrC+0,invsqrta+jnrD+0,
+ invsqrta+jnrE+0,invsqrta+jnrF+0,
+ invsqrta+jnrG+0,invsqrta+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
+ isaprod = _mm256_mul_ps(isai0,isaj0);
+ gbqqfactor = _mm256_xor_ps(signbit,_mm256_mul_ps(qq00,_mm256_mul_ps(isaprod,gbinvepsdiff)));
+ gbscale = _mm256_mul_ps(isaprod,gbtabscale);
+
+ /* Calculate generalized born table index - this is a separate table from the normal one,
+ * but we use the same procedure by multiplying r with scale and truncating to integer.
+ */
+ rt = _mm256_mul_ps(r00,gbscale);
+ gbitab = _mm256_cvttps_epi32(rt);
+ gbeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ gbitab_lo = _mm256_extractf128_si256(gbitab,0x0);
+ gbitab_hi = _mm256_extractf128_si256(gbitab,0x1);
+ gbitab_lo = _mm_slli_epi32(gbitab_lo,2);
+ gbitab_hi = _mm_slli_epi32(gbitab_hi,2);
+ Y = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,0)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,1)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,2)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,3)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(gbeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(gbeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(gbeps,Fp));
+ vgb = _mm256_mul_ps(gbqqfactor,VV);
+
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(gbeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fgb = _mm256_mul_ps(gbqqfactor,_mm256_mul_ps(FF,gbscale));
+ dvdatmp = _mm256_mul_ps(minushalf,_mm256_add_ps(vgb,_mm256_mul_ps(fgb,r00)));
+ dvdasum = _mm256_add_ps(dvdasum,dvdatmp);
+ fjptrA = dvda+jnrA;
+ fjptrB = dvda+jnrB;
+ fjptrC = dvda+jnrC;
+ fjptrD = dvda+jnrD;
+ fjptrE = dvda+jnrE;
+ fjptrF = dvda+jnrF;
+ fjptrG = dvda+jnrG;
+ fjptrH = dvda+jnrH;
+ gmx_mm256_increment_8real_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ _mm256_mul_ps(dvdatmp,_mm256_mul_ps(isaj0,isaj0)));
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(velec,rinv00),fgb),rinv00);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vgbsum = _mm256_add_ps(vgbsum,vgb);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 91 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ isaj0 = gmx_mm256_load_8real_swizzle_ps(invsqrta+jnrA+0,invsqrta+jnrB+0,
+ invsqrta+jnrC+0,invsqrta+jnrD+0,
+ invsqrta+jnrE+0,invsqrta+jnrF+0,
+ invsqrta+jnrG+0,invsqrta+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
+ isaprod = _mm256_mul_ps(isai0,isaj0);
+ gbqqfactor = _mm256_xor_ps(signbit,_mm256_mul_ps(qq00,_mm256_mul_ps(isaprod,gbinvepsdiff)));
+ gbscale = _mm256_mul_ps(isaprod,gbtabscale);
+
+ /* Calculate generalized born table index - this is a separate table from the normal one,
+ * but we use the same procedure by multiplying r with scale and truncating to integer.
+ */
+ rt = _mm256_mul_ps(r00,gbscale);
+ gbitab = _mm256_cvttps_epi32(rt);
+ gbeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ gbitab_lo = _mm256_extractf128_si256(gbitab,0x0);
+ gbitab_hi = _mm256_extractf128_si256(gbitab,0x1);
+ gbitab_lo = _mm_slli_epi32(gbitab_lo,2);
+ gbitab_hi = _mm_slli_epi32(gbitab_hi,2);
+ Y = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,0)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,1)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,2)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,3)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(gbeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(gbeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(gbeps,Fp));
+ vgb = _mm256_mul_ps(gbqqfactor,VV);
+
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(gbeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fgb = _mm256_mul_ps(gbqqfactor,_mm256_mul_ps(FF,gbscale));
+ dvdatmp = _mm256_mul_ps(minushalf,_mm256_add_ps(vgb,_mm256_mul_ps(fgb,r00)));
+ dvdasum = _mm256_add_ps(dvdasum,dvdatmp);
+ /* The pointers to scratch make sure that this code with compilers that take gmx_restrict seriously (e.g. icc 13) really can't screw things up. */
+ fjptrA = (jnrlistA>=0) ? dvda+jnrA : scratch;
+ fjptrB = (jnrlistB>=0) ? dvda+jnrB : scratch;
+ fjptrC = (jnrlistC>=0) ? dvda+jnrC : scratch;
+ fjptrD = (jnrlistD>=0) ? dvda+jnrD : scratch;
+ fjptrE = (jnrlistE>=0) ? dvda+jnrE : scratch;
+ fjptrF = (jnrlistF>=0) ? dvda+jnrF : scratch;
+ fjptrG = (jnrlistG>=0) ? dvda+jnrG : scratch;
+ fjptrH = (jnrlistH>=0) ? dvda+jnrH : scratch;
+ gmx_mm256_increment_8real_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ _mm256_mul_ps(dvdatmp,_mm256_mul_ps(isaj0,isaj0)));
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(velec,rinv00),fgb),rinv00);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vgb = _mm256_andnot_ps(dummy_mask,vgb);
+ vgbsum = _mm256_add_ps(vgbsum,vgb);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 92 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vgbsum,kernel_data->energygrp_polarization+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+ dvdasum = _mm256_mul_ps(dvdasum, _mm256_mul_ps(isai0,isai0));
+ gmx_mm256_update_1pot_ps(dvdasum,dvda+inr);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 10 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*10 + inneriter*92);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecGB_VdwCSTab_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: GeneralizedBorn
+ * VdW interaction: CubicSplineTable
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecGB_VdwCSTab_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i gbitab;
+ __m128i gbitab_lo,gbitab_hi;
+ __m256 vgb,fgb,vgbsum,dvdasum,gbscale,gbtabscale,isaprod,gbqqfactor,gbinvepsdiff,gbeps,dvdatmp;
+ __m256 minushalf = _mm256_set1_ps(-0.5);
+ real *invsqrta,*dvda,*gbtab;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ invsqrta = fr->invsqrta;
+ dvda = fr->dvda;
+ gbtabscale = _mm256_set1_ps(fr->gbtab.scale);
+ gbtab = fr->gbtab.data;
+ gbinvepsdiff = _mm256_set1_ps((1.0/fr->epsilon_r) - (1.0/fr->gb_epsilon_solvent));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ isai0 = _mm256_set1_ps(invsqrta[inr+0]);
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ dvdasum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ isaj0 = gmx_mm256_load_8real_swizzle_ps(invsqrta+jnrA+0,invsqrta+jnrB+0,
+ invsqrta+jnrC+0,invsqrta+jnrD+0,
+ invsqrta+jnrE+0,invsqrta+jnrF+0,
+ invsqrta+jnrG+0,invsqrta+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
+ isaprod = _mm256_mul_ps(isai0,isaj0);
+ gbqqfactor = _mm256_xor_ps(signbit,_mm256_mul_ps(qq00,_mm256_mul_ps(isaprod,gbinvepsdiff)));
+ gbscale = _mm256_mul_ps(isaprod,gbtabscale);
+
+ /* Calculate generalized born table index - this is a separate table from the normal one,
+ * but we use the same procedure by multiplying r with scale and truncating to integer.
+ */
+ rt = _mm256_mul_ps(r00,gbscale);
+ gbitab = _mm256_cvttps_epi32(rt);
+ gbeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ gbitab_lo = _mm256_extractf128_si256(gbitab,0x0);
+ gbitab_hi = _mm256_extractf128_si256(gbitab,0x1);
+ gbitab_lo = _mm_slli_epi32(gbitab_lo,2);
+ gbitab_hi = _mm_slli_epi32(gbitab_hi,2);
+ Y = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,0)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,1)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,2)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,3)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(gbeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(gbeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(gbeps,Fp));
+ vgb = _mm256_mul_ps(gbqqfactor,VV);
+
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(gbeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fgb = _mm256_mul_ps(gbqqfactor,_mm256_mul_ps(FF,gbscale));
+ dvdatmp = _mm256_mul_ps(minushalf,_mm256_add_ps(vgb,_mm256_mul_ps(fgb,r00)));
+ dvdasum = _mm256_add_ps(dvdasum,dvdatmp);
+ fjptrA = dvda+jnrA;
+ fjptrB = dvda+jnrB;
+ fjptrC = dvda+jnrC;
+ fjptrD = dvda+jnrD;
+ fjptrE = dvda+jnrE;
+ fjptrF = dvda+jnrF;
+ fjptrG = dvda+jnrG;
+ fjptrH = dvda+jnrH;
+ gmx_mm256_increment_8real_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ _mm256_mul_ps(dvdatmp,_mm256_mul_ps(isaj0,isaj0)));
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(velec,rinv00),fgb),rinv00);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 81 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ isaj0 = gmx_mm256_load_8real_swizzle_ps(invsqrta+jnrA+0,invsqrta+jnrB+0,
+ invsqrta+jnrC+0,invsqrta+jnrD+0,
+ invsqrta+jnrE+0,invsqrta+jnrF+0,
+ invsqrta+jnrG+0,invsqrta+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
+ isaprod = _mm256_mul_ps(isai0,isaj0);
+ gbqqfactor = _mm256_xor_ps(signbit,_mm256_mul_ps(qq00,_mm256_mul_ps(isaprod,gbinvepsdiff)));
+ gbscale = _mm256_mul_ps(isaprod,gbtabscale);
+
+ /* Calculate generalized born table index - this is a separate table from the normal one,
+ * but we use the same procedure by multiplying r with scale and truncating to integer.
+ */
+ rt = _mm256_mul_ps(r00,gbscale);
+ gbitab = _mm256_cvttps_epi32(rt);
+ gbeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ gbitab_lo = _mm256_extractf128_si256(gbitab,0x0);
+ gbitab_hi = _mm256_extractf128_si256(gbitab,0x1);
+ gbitab_lo = _mm_slli_epi32(gbitab_lo,2);
+ gbitab_hi = _mm_slli_epi32(gbitab_hi,2);
+ Y = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,0)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,1)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,2)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,3)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(gbeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(gbeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(gbeps,Fp));
+ vgb = _mm256_mul_ps(gbqqfactor,VV);
+
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(gbeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fgb = _mm256_mul_ps(gbqqfactor,_mm256_mul_ps(FF,gbscale));
+ dvdatmp = _mm256_mul_ps(minushalf,_mm256_add_ps(vgb,_mm256_mul_ps(fgb,r00)));
+ dvdasum = _mm256_add_ps(dvdasum,dvdatmp);
+ /* The pointers to scratch make sure that this code with compilers that take gmx_restrict seriously (e.g. icc 13) really can't screw things up. */
+ fjptrA = (jnrlistA>=0) ? dvda+jnrA : scratch;
+ fjptrB = (jnrlistB>=0) ? dvda+jnrB : scratch;
+ fjptrC = (jnrlistC>=0) ? dvda+jnrC : scratch;
+ fjptrD = (jnrlistD>=0) ? dvda+jnrD : scratch;
+ fjptrE = (jnrlistE>=0) ? dvda+jnrE : scratch;
+ fjptrF = (jnrlistF>=0) ? dvda+jnrF : scratch;
+ fjptrG = (jnrlistG>=0) ? dvda+jnrG : scratch;
+ fjptrH = (jnrlistH>=0) ? dvda+jnrH : scratch;
+ gmx_mm256_increment_8real_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ _mm256_mul_ps(dvdatmp,_mm256_mul_ps(isaj0,isaj0)));
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(velec,rinv00),fgb),rinv00);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 82 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ dvdasum = _mm256_mul_ps(dvdasum, _mm256_mul_ps(isai0,isai0));
+ gmx_mm256_update_1pot_ps(dvdasum,dvda+inr);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*82);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecGB_VdwLJ_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: GeneralizedBorn
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecGB_VdwLJ_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i gbitab;
+ __m128i gbitab_lo,gbitab_hi;
+ __m256 vgb,fgb,vgbsum,dvdasum,gbscale,gbtabscale,isaprod,gbqqfactor,gbinvepsdiff,gbeps,dvdatmp;
+ __m256 minushalf = _mm256_set1_ps(-0.5);
+ real *invsqrta,*dvda,*gbtab;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ invsqrta = fr->invsqrta;
+ dvda = fr->dvda;
+ gbtabscale = _mm256_set1_ps(fr->gbtab.scale);
+ gbtab = fr->gbtab.data;
+ gbinvepsdiff = _mm256_set1_ps((1.0/fr->epsilon_r) - (1.0/fr->gb_epsilon_solvent));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ isai0 = _mm256_set1_ps(invsqrta[inr+0]);
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vgbsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+ dvdasum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ isaj0 = gmx_mm256_load_8real_swizzle_ps(invsqrta+jnrA+0,invsqrta+jnrB+0,
+ invsqrta+jnrC+0,invsqrta+jnrD+0,
+ invsqrta+jnrE+0,invsqrta+jnrF+0,
+ invsqrta+jnrG+0,invsqrta+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
+ isaprod = _mm256_mul_ps(isai0,isaj0);
+ gbqqfactor = _mm256_xor_ps(signbit,_mm256_mul_ps(qq00,_mm256_mul_ps(isaprod,gbinvepsdiff)));
+ gbscale = _mm256_mul_ps(isaprod,gbtabscale);
+
+ /* Calculate generalized born table index - this is a separate table from the normal one,
+ * but we use the same procedure by multiplying r with scale and truncating to integer.
+ */
+ rt = _mm256_mul_ps(r00,gbscale);
+ gbitab = _mm256_cvttps_epi32(rt);
+ gbeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ gbitab_lo = _mm256_extractf128_si256(gbitab,0x0);
+ gbitab_hi = _mm256_extractf128_si256(gbitab,0x1);
+ gbitab_lo = _mm_slli_epi32(gbitab_lo,2);
+ gbitab_hi = _mm_slli_epi32(gbitab_hi,2);
+ Y = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,0)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,1)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,2)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,3)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(gbeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(gbeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(gbeps,Fp));
+ vgb = _mm256_mul_ps(gbqqfactor,VV);
+
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(gbeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fgb = _mm256_mul_ps(gbqqfactor,_mm256_mul_ps(FF,gbscale));
+ dvdatmp = _mm256_mul_ps(minushalf,_mm256_add_ps(vgb,_mm256_mul_ps(fgb,r00)));
+ dvdasum = _mm256_add_ps(dvdasum,dvdatmp);
+ fjptrA = dvda+jnrA;
+ fjptrB = dvda+jnrB;
+ fjptrC = dvda+jnrC;
+ fjptrD = dvda+jnrD;
+ fjptrE = dvda+jnrE;
+ fjptrF = dvda+jnrF;
+ fjptrG = dvda+jnrG;
+ fjptrH = dvda+jnrH;
+ gmx_mm256_increment_8real_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ _mm256_mul_ps(dvdatmp,_mm256_mul_ps(isaj0,isaj0)));
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(velec,rinv00),fgb),rinv00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vgbsum = _mm256_add_ps(vgbsum,vgb);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 70 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ isaj0 = gmx_mm256_load_8real_swizzle_ps(invsqrta+jnrA+0,invsqrta+jnrB+0,
+ invsqrta+jnrC+0,invsqrta+jnrD+0,
+ invsqrta+jnrE+0,invsqrta+jnrF+0,
+ invsqrta+jnrG+0,invsqrta+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
+ isaprod = _mm256_mul_ps(isai0,isaj0);
+ gbqqfactor = _mm256_xor_ps(signbit,_mm256_mul_ps(qq00,_mm256_mul_ps(isaprod,gbinvepsdiff)));
+ gbscale = _mm256_mul_ps(isaprod,gbtabscale);
+
+ /* Calculate generalized born table index - this is a separate table from the normal one,
+ * but we use the same procedure by multiplying r with scale and truncating to integer.
+ */
+ rt = _mm256_mul_ps(r00,gbscale);
+ gbitab = _mm256_cvttps_epi32(rt);
+ gbeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ gbitab_lo = _mm256_extractf128_si256(gbitab,0x0);
+ gbitab_hi = _mm256_extractf128_si256(gbitab,0x1);
+ gbitab_lo = _mm_slli_epi32(gbitab_lo,2);
+ gbitab_hi = _mm_slli_epi32(gbitab_hi,2);
+ Y = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,0)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,1)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,2)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,3)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(gbeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(gbeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(gbeps,Fp));
+ vgb = _mm256_mul_ps(gbqqfactor,VV);
+
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(gbeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fgb = _mm256_mul_ps(gbqqfactor,_mm256_mul_ps(FF,gbscale));
+ dvdatmp = _mm256_mul_ps(minushalf,_mm256_add_ps(vgb,_mm256_mul_ps(fgb,r00)));
+ dvdasum = _mm256_add_ps(dvdasum,dvdatmp);
+ /* The pointers to scratch make sure that this code with compilers that take gmx_restrict seriously (e.g. icc 13) really can't screw things up. */
+ fjptrA = (jnrlistA>=0) ? dvda+jnrA : scratch;
+ fjptrB = (jnrlistB>=0) ? dvda+jnrB : scratch;
+ fjptrC = (jnrlistC>=0) ? dvda+jnrC : scratch;
+ fjptrD = (jnrlistD>=0) ? dvda+jnrD : scratch;
+ fjptrE = (jnrlistE>=0) ? dvda+jnrE : scratch;
+ fjptrF = (jnrlistF>=0) ? dvda+jnrF : scratch;
+ fjptrG = (jnrlistG>=0) ? dvda+jnrG : scratch;
+ fjptrH = (jnrlistH>=0) ? dvda+jnrH : scratch;
+ gmx_mm256_increment_8real_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ _mm256_mul_ps(dvdatmp,_mm256_mul_ps(isaj0,isaj0)));
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(velec,rinv00),fgb),rinv00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vgb = _mm256_andnot_ps(dummy_mask,vgb);
+ vgbsum = _mm256_add_ps(vgbsum,vgb);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 71 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vgbsum,kernel_data->energygrp_polarization+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+ dvdasum = _mm256_mul_ps(dvdasum, _mm256_mul_ps(isai0,isai0));
+ gmx_mm256_update_1pot_ps(dvdasum,dvda+inr);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 10 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*10 + inneriter*71);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecGB_VdwLJ_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: GeneralizedBorn
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecGB_VdwLJ_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i gbitab;
+ __m128i gbitab_lo,gbitab_hi;
+ __m256 vgb,fgb,vgbsum,dvdasum,gbscale,gbtabscale,isaprod,gbqqfactor,gbinvepsdiff,gbeps,dvdatmp;
+ __m256 minushalf = _mm256_set1_ps(-0.5);
+ real *invsqrta,*dvda,*gbtab;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ invsqrta = fr->invsqrta;
+ dvda = fr->dvda;
+ gbtabscale = _mm256_set1_ps(fr->gbtab.scale);
+ gbtab = fr->gbtab.data;
+ gbinvepsdiff = _mm256_set1_ps((1.0/fr->epsilon_r) - (1.0/fr->gb_epsilon_solvent));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ isai0 = _mm256_set1_ps(invsqrta[inr+0]);
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ dvdasum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ isaj0 = gmx_mm256_load_8real_swizzle_ps(invsqrta+jnrA+0,invsqrta+jnrB+0,
+ invsqrta+jnrC+0,invsqrta+jnrD+0,
+ invsqrta+jnrE+0,invsqrta+jnrF+0,
+ invsqrta+jnrG+0,invsqrta+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
+ isaprod = _mm256_mul_ps(isai0,isaj0);
+ gbqqfactor = _mm256_xor_ps(signbit,_mm256_mul_ps(qq00,_mm256_mul_ps(isaprod,gbinvepsdiff)));
+ gbscale = _mm256_mul_ps(isaprod,gbtabscale);
+
+ /* Calculate generalized born table index - this is a separate table from the normal one,
+ * but we use the same procedure by multiplying r with scale and truncating to integer.
+ */
+ rt = _mm256_mul_ps(r00,gbscale);
+ gbitab = _mm256_cvttps_epi32(rt);
+ gbeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ gbitab_lo = _mm256_extractf128_si256(gbitab,0x0);
+ gbitab_hi = _mm256_extractf128_si256(gbitab,0x1);
+ gbitab_lo = _mm_slli_epi32(gbitab_lo,2);
+ gbitab_hi = _mm_slli_epi32(gbitab_hi,2);
+ Y = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,0)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,1)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,2)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,3)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(gbeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(gbeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(gbeps,Fp));
+ vgb = _mm256_mul_ps(gbqqfactor,VV);
+
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(gbeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fgb = _mm256_mul_ps(gbqqfactor,_mm256_mul_ps(FF,gbscale));
+ dvdatmp = _mm256_mul_ps(minushalf,_mm256_add_ps(vgb,_mm256_mul_ps(fgb,r00)));
+ dvdasum = _mm256_add_ps(dvdasum,dvdatmp);
+ fjptrA = dvda+jnrA;
+ fjptrB = dvda+jnrB;
+ fjptrC = dvda+jnrC;
+ fjptrD = dvda+jnrD;
+ fjptrE = dvda+jnrE;
+ fjptrF = dvda+jnrF;
+ fjptrG = dvda+jnrG;
+ fjptrH = dvda+jnrH;
+ gmx_mm256_increment_8real_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ _mm256_mul_ps(dvdatmp,_mm256_mul_ps(isaj0,isaj0)));
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(velec,rinv00),fgb),rinv00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 63 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ isaj0 = gmx_mm256_load_8real_swizzle_ps(invsqrta+jnrA+0,invsqrta+jnrB+0,
+ invsqrta+jnrC+0,invsqrta+jnrD+0,
+ invsqrta+jnrE+0,invsqrta+jnrF+0,
+ invsqrta+jnrG+0,invsqrta+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
+ isaprod = _mm256_mul_ps(isai0,isaj0);
+ gbqqfactor = _mm256_xor_ps(signbit,_mm256_mul_ps(qq00,_mm256_mul_ps(isaprod,gbinvepsdiff)));
+ gbscale = _mm256_mul_ps(isaprod,gbtabscale);
+
+ /* Calculate generalized born table index - this is a separate table from the normal one,
+ * but we use the same procedure by multiplying r with scale and truncating to integer.
+ */
+ rt = _mm256_mul_ps(r00,gbscale);
+ gbitab = _mm256_cvttps_epi32(rt);
+ gbeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ gbitab_lo = _mm256_extractf128_si256(gbitab,0x0);
+ gbitab_hi = _mm256_extractf128_si256(gbitab,0x1);
+ gbitab_lo = _mm_slli_epi32(gbitab_lo,2);
+ gbitab_hi = _mm_slli_epi32(gbitab_hi,2);
+ Y = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,0)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,1)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,2)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,3)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(gbeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(gbeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(gbeps,Fp));
+ vgb = _mm256_mul_ps(gbqqfactor,VV);
+
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(gbeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fgb = _mm256_mul_ps(gbqqfactor,_mm256_mul_ps(FF,gbscale));
+ dvdatmp = _mm256_mul_ps(minushalf,_mm256_add_ps(vgb,_mm256_mul_ps(fgb,r00)));
+ dvdasum = _mm256_add_ps(dvdasum,dvdatmp);
+ /* The pointers to scratch make sure that this code with compilers that take gmx_restrict seriously (e.g. icc 13) really can't screw things up. */
+ fjptrA = (jnrlistA>=0) ? dvda+jnrA : scratch;
+ fjptrB = (jnrlistB>=0) ? dvda+jnrB : scratch;
+ fjptrC = (jnrlistC>=0) ? dvda+jnrC : scratch;
+ fjptrD = (jnrlistD>=0) ? dvda+jnrD : scratch;
+ fjptrE = (jnrlistE>=0) ? dvda+jnrE : scratch;
+ fjptrF = (jnrlistF>=0) ? dvda+jnrF : scratch;
+ fjptrG = (jnrlistG>=0) ? dvda+jnrG : scratch;
+ fjptrH = (jnrlistH>=0) ? dvda+jnrH : scratch;
+ gmx_mm256_increment_8real_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ _mm256_mul_ps(dvdatmp,_mm256_mul_ps(isaj0,isaj0)));
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(velec,rinv00),fgb),rinv00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 64 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ dvdasum = _mm256_mul_ps(dvdasum, _mm256_mul_ps(isai0,isai0));
+ gmx_mm256_update_1pot_ps(dvdasum,dvda+inr);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*64);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecGB_VdwNone_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: GeneralizedBorn
+ * VdW interaction: None
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecGB_VdwNone_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i gbitab;
+ __m128i gbitab_lo,gbitab_hi;
+ __m256 vgb,fgb,vgbsum,dvdasum,gbscale,gbtabscale,isaprod,gbqqfactor,gbinvepsdiff,gbeps,dvdatmp;
+ __m256 minushalf = _mm256_set1_ps(-0.5);
+ real *invsqrta,*dvda,*gbtab;
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ invsqrta = fr->invsqrta;
+ dvda = fr->dvda;
+ gbtabscale = _mm256_set1_ps(fr->gbtab.scale);
+ gbtab = fr->gbtab.data;
+ gbinvepsdiff = _mm256_set1_ps((1.0/fr->epsilon_r) - (1.0/fr->gb_epsilon_solvent));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ isai0 = _mm256_set1_ps(invsqrta[inr+0]);
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vgbsum = _mm256_setzero_ps();
+ dvdasum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ isaj0 = gmx_mm256_load_8real_swizzle_ps(invsqrta+jnrA+0,invsqrta+jnrB+0,
+ invsqrta+jnrC+0,invsqrta+jnrD+0,
+ invsqrta+jnrE+0,invsqrta+jnrF+0,
+ invsqrta+jnrG+0,invsqrta+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
+ isaprod = _mm256_mul_ps(isai0,isaj0);
+ gbqqfactor = _mm256_xor_ps(signbit,_mm256_mul_ps(qq00,_mm256_mul_ps(isaprod,gbinvepsdiff)));
+ gbscale = _mm256_mul_ps(isaprod,gbtabscale);
+
+ /* Calculate generalized born table index - this is a separate table from the normal one,
+ * but we use the same procedure by multiplying r with scale and truncating to integer.
+ */
+ rt = _mm256_mul_ps(r00,gbscale);
+ gbitab = _mm256_cvttps_epi32(rt);
+ gbeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ gbitab_lo = _mm256_extractf128_si256(gbitab,0x0);
+ gbitab_hi = _mm256_extractf128_si256(gbitab,0x1);
+ gbitab_lo = _mm_slli_epi32(gbitab_lo,2);
+ gbitab_hi = _mm_slli_epi32(gbitab_hi,2);
+ Y = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,0)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,1)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,2)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,3)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(gbeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(gbeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(gbeps,Fp));
+ vgb = _mm256_mul_ps(gbqqfactor,VV);
+
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(gbeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fgb = _mm256_mul_ps(gbqqfactor,_mm256_mul_ps(FF,gbscale));
+ dvdatmp = _mm256_mul_ps(minushalf,_mm256_add_ps(vgb,_mm256_mul_ps(fgb,r00)));
+ dvdasum = _mm256_add_ps(dvdasum,dvdatmp);
+ fjptrA = dvda+jnrA;
+ fjptrB = dvda+jnrB;
+ fjptrC = dvda+jnrC;
+ fjptrD = dvda+jnrD;
+ fjptrE = dvda+jnrE;
+ fjptrF = dvda+jnrF;
+ fjptrG = dvda+jnrG;
+ fjptrH = dvda+jnrH;
+ gmx_mm256_increment_8real_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ _mm256_mul_ps(dvdatmp,_mm256_mul_ps(isaj0,isaj0)));
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(velec,rinv00),fgb),rinv00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vgbsum = _mm256_add_ps(vgbsum,vgb);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 57 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ isaj0 = gmx_mm256_load_8real_swizzle_ps(invsqrta+jnrA+0,invsqrta+jnrB+0,
+ invsqrta+jnrC+0,invsqrta+jnrD+0,
+ invsqrta+jnrE+0,invsqrta+jnrF+0,
+ invsqrta+jnrG+0,invsqrta+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
+ isaprod = _mm256_mul_ps(isai0,isaj0);
+ gbqqfactor = _mm256_xor_ps(signbit,_mm256_mul_ps(qq00,_mm256_mul_ps(isaprod,gbinvepsdiff)));
+ gbscale = _mm256_mul_ps(isaprod,gbtabscale);
+
+ /* Calculate generalized born table index - this is a separate table from the normal one,
+ * but we use the same procedure by multiplying r with scale and truncating to integer.
+ */
+ rt = _mm256_mul_ps(r00,gbscale);
+ gbitab = _mm256_cvttps_epi32(rt);
+ gbeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ gbitab_lo = _mm256_extractf128_si256(gbitab,0x0);
+ gbitab_hi = _mm256_extractf128_si256(gbitab,0x1);
+ gbitab_lo = _mm_slli_epi32(gbitab_lo,2);
+ gbitab_hi = _mm_slli_epi32(gbitab_hi,2);
+ Y = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,0)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,1)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,2)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,3)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(gbeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(gbeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(gbeps,Fp));
+ vgb = _mm256_mul_ps(gbqqfactor,VV);
+
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(gbeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fgb = _mm256_mul_ps(gbqqfactor,_mm256_mul_ps(FF,gbscale));
+ dvdatmp = _mm256_mul_ps(minushalf,_mm256_add_ps(vgb,_mm256_mul_ps(fgb,r00)));
+ dvdasum = _mm256_add_ps(dvdasum,dvdatmp);
+ /* The pointers to scratch make sure that this code with compilers that take gmx_restrict seriously (e.g. icc 13) really can't screw things up. */
+ fjptrA = (jnrlistA>=0) ? dvda+jnrA : scratch;
+ fjptrB = (jnrlistB>=0) ? dvda+jnrB : scratch;
+ fjptrC = (jnrlistC>=0) ? dvda+jnrC : scratch;
+ fjptrD = (jnrlistD>=0) ? dvda+jnrD : scratch;
+ fjptrE = (jnrlistE>=0) ? dvda+jnrE : scratch;
+ fjptrF = (jnrlistF>=0) ? dvda+jnrF : scratch;
+ fjptrG = (jnrlistG>=0) ? dvda+jnrG : scratch;
+ fjptrH = (jnrlistH>=0) ? dvda+jnrH : scratch;
+ gmx_mm256_increment_8real_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ _mm256_mul_ps(dvdatmp,_mm256_mul_ps(isaj0,isaj0)));
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(velec,rinv00),fgb),rinv00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vgb = _mm256_andnot_ps(dummy_mask,vgb);
+ vgbsum = _mm256_add_ps(vgbsum,vgb);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 58 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vgbsum,kernel_data->energygrp_polarization+ggid);
+ dvdasum = _mm256_mul_ps(dvdasum, _mm256_mul_ps(isai0,isai0));
+ gmx_mm256_update_1pot_ps(dvdasum,dvda+inr);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 9 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VF,outeriter*9 + inneriter*58);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecGB_VdwNone_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: GeneralizedBorn
+ * VdW interaction: None
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecGB_VdwNone_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256i gbitab;
+ __m128i gbitab_lo,gbitab_hi;
+ __m256 vgb,fgb,vgbsum,dvdasum,gbscale,gbtabscale,isaprod,gbqqfactor,gbinvepsdiff,gbeps,dvdatmp;
+ __m256 minushalf = _mm256_set1_ps(-0.5);
+ real *invsqrta,*dvda,*gbtab;
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+
+ invsqrta = fr->invsqrta;
+ dvda = fr->dvda;
+ gbtabscale = _mm256_set1_ps(fr->gbtab.scale);
+ gbtab = fr->gbtab.data;
+ gbinvepsdiff = _mm256_set1_ps((1.0/fr->epsilon_r) - (1.0/fr->gb_epsilon_solvent));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ isai0 = _mm256_set1_ps(invsqrta[inr+0]);
+
+ dvdasum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ isaj0 = gmx_mm256_load_8real_swizzle_ps(invsqrta+jnrA+0,invsqrta+jnrB+0,
+ invsqrta+jnrC+0,invsqrta+jnrD+0,
+ invsqrta+jnrE+0,invsqrta+jnrF+0,
+ invsqrta+jnrG+0,invsqrta+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
+ isaprod = _mm256_mul_ps(isai0,isaj0);
+ gbqqfactor = _mm256_xor_ps(signbit,_mm256_mul_ps(qq00,_mm256_mul_ps(isaprod,gbinvepsdiff)));
+ gbscale = _mm256_mul_ps(isaprod,gbtabscale);
+
+ /* Calculate generalized born table index - this is a separate table from the normal one,
+ * but we use the same procedure by multiplying r with scale and truncating to integer.
+ */
+ rt = _mm256_mul_ps(r00,gbscale);
+ gbitab = _mm256_cvttps_epi32(rt);
+ gbeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ gbitab_lo = _mm256_extractf128_si256(gbitab,0x0);
+ gbitab_hi = _mm256_extractf128_si256(gbitab,0x1);
+ gbitab_lo = _mm_slli_epi32(gbitab_lo,2);
+ gbitab_hi = _mm_slli_epi32(gbitab_hi,2);
+ Y = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,0)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,1)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,2)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,3)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(gbeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(gbeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(gbeps,Fp));
+ vgb = _mm256_mul_ps(gbqqfactor,VV);
+
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(gbeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fgb = _mm256_mul_ps(gbqqfactor,_mm256_mul_ps(FF,gbscale));
+ dvdatmp = _mm256_mul_ps(minushalf,_mm256_add_ps(vgb,_mm256_mul_ps(fgb,r00)));
+ dvdasum = _mm256_add_ps(dvdasum,dvdatmp);
+ fjptrA = dvda+jnrA;
+ fjptrB = dvda+jnrB;
+ fjptrC = dvda+jnrC;
+ fjptrD = dvda+jnrD;
+ fjptrE = dvda+jnrE;
+ fjptrF = dvda+jnrF;
+ fjptrG = dvda+jnrG;
+ fjptrH = dvda+jnrH;
+ gmx_mm256_increment_8real_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ _mm256_mul_ps(dvdatmp,_mm256_mul_ps(isaj0,isaj0)));
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(velec,rinv00),fgb),rinv00);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 55 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ isaj0 = gmx_mm256_load_8real_swizzle_ps(invsqrta+jnrA+0,invsqrta+jnrB+0,
+ invsqrta+jnrC+0,invsqrta+jnrD+0,
+ invsqrta+jnrE+0,invsqrta+jnrF+0,
+ invsqrta+jnrG+0,invsqrta+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
+ isaprod = _mm256_mul_ps(isai0,isaj0);
+ gbqqfactor = _mm256_xor_ps(signbit,_mm256_mul_ps(qq00,_mm256_mul_ps(isaprod,gbinvepsdiff)));
+ gbscale = _mm256_mul_ps(isaprod,gbtabscale);
+
+ /* Calculate generalized born table index - this is a separate table from the normal one,
+ * but we use the same procedure by multiplying r with scale and truncating to integer.
+ */
+ rt = _mm256_mul_ps(r00,gbscale);
+ gbitab = _mm256_cvttps_epi32(rt);
+ gbeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ gbitab_lo = _mm256_extractf128_si256(gbitab,0x0);
+ gbitab_hi = _mm256_extractf128_si256(gbitab,0x1);
+ gbitab_lo = _mm_slli_epi32(gbitab_lo,2);
+ gbitab_hi = _mm_slli_epi32(gbitab_hi,2);
+ Y = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,0)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,1)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,2)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,3)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(gbeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(gbeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(gbeps,Fp));
+ vgb = _mm256_mul_ps(gbqqfactor,VV);
+
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(gbeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fgb = _mm256_mul_ps(gbqqfactor,_mm256_mul_ps(FF,gbscale));
+ dvdatmp = _mm256_mul_ps(minushalf,_mm256_add_ps(vgb,_mm256_mul_ps(fgb,r00)));
+ dvdasum = _mm256_add_ps(dvdasum,dvdatmp);
+ /* The pointers to scratch make sure that this code with compilers that take gmx_restrict seriously (e.g. icc 13) really can't screw things up. */
+ fjptrA = (jnrlistA>=0) ? dvda+jnrA : scratch;
+ fjptrB = (jnrlistB>=0) ? dvda+jnrB : scratch;
+ fjptrC = (jnrlistC>=0) ? dvda+jnrC : scratch;
+ fjptrD = (jnrlistD>=0) ? dvda+jnrD : scratch;
+ fjptrE = (jnrlistE>=0) ? dvda+jnrE : scratch;
+ fjptrF = (jnrlistF>=0) ? dvda+jnrF : scratch;
+ fjptrG = (jnrlistG>=0) ? dvda+jnrG : scratch;
+ fjptrH = (jnrlistH>=0) ? dvda+jnrH : scratch;
+ gmx_mm256_increment_8real_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ _mm256_mul_ps(dvdatmp,_mm256_mul_ps(isaj0,isaj0)));
+ velec = _mm256_mul_ps(qq00,rinv00);
+ felec = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(velec,rinv00),fgb),rinv00);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 56 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ dvdasum = _mm256_mul_ps(dvdasum, _mm256_mul_ps(isai0,isai0));
+ gmx_mm256_update_1pot_ps(dvdasum,dvda+inr);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_F,outeriter*7 + inneriter*56);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecNone_VdwCSTab_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: None
+ * VdW interaction: CubicSplineTable
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecNone_VdwCSTab_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Reset potential sums */
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ /* Load parameters for j particles */
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 56 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ /* Load parameters for j particles */
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 57 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_VDW_VF,outeriter*7 + inneriter*57);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecNone_VdwCSTab_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: None
+ * VdW interaction: CubicSplineTable
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecNone_VdwCSTab_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ /* Load parameters for j particles */
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 48 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ /* Load parameters for j particles */
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 49 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 6 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_VDW_F,outeriter*6 + inneriter*49);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecNone_VdwLJSh_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: None
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecNone_VdwLJSh_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ rcutoff_scalar = fr->rvdw;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Reset potential sums */
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+
+ /* Load parameters for j particles */
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 41 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+
+ /* Load parameters for j particles */
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 41 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_VDW_VF,outeriter*7 + inneriter*41);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecNone_VdwLJSh_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: None
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecNone_VdwLJSh_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ rcutoff_scalar = fr->rvdw;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+
+ /* Load parameters for j particles */
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 30 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+
+ /* Load parameters for j particles */
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 30 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 6 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_VDW_F,outeriter*6 + inneriter*30);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecNone_VdwLJSw_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: None
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecNone_VdwLJSw_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ rcutoff_scalar = fr->rvdw;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rvdw_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Reset potential sums */
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 59 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 60 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_VDW_VF,outeriter*7 + inneriter*60);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecNone_VdwLJSw_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: None
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecNone_VdwLJSw_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ rcutoff_scalar = fr->rvdw;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rvdw_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 56 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 57 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 6 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_VDW_F,outeriter*6 + inneriter*57);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecNone_VdwLJ_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: None
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecNone_VdwLJ_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Reset potential sums */
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+
+ /* Load parameters for j particles */
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 32 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+
+ /* Load parameters for j particles */
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 32 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_VDW_VF,outeriter*7 + inneriter*32);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecNone_VdwLJ_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: None
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecNone_VdwLJ_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+
+ /* Load parameters for j particles */
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 27 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+
+ /* Load parameters for j particles */
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 27 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 6 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_VDW_F,outeriter*6 + inneriter*27);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwCSTab_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: CubicSplineTable
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRFCut_VdwCSTab_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 72 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 73 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 9 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*73);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwCSTab_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: CubicSplineTable
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRFCut_VdwCSTab_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 57 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 58 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*58);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwCSTab_GeomW3P1_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water3-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRFCut_VdwCSTab_GeomW3P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 147 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 148 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*148);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwCSTab_GeomW3P1_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water3-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRFCut_VdwCSTab_GeomW3P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 120 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 121 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*121);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water3-Water3
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_add_ps(rinv01,_mm256_mul_ps(krf,rsq01)),crf));
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_add_ps(rinv02,_mm256_mul_ps(krf,rsq02)),crf));
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 360 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_add_ps(rinv01,_mm256_mul_ps(krf,rsq01)),crf));
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_add_ps(rinv02,_mm256_mul_ps(krf,rsq02)),crf));
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 361 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*361);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water3-Water3
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 297 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 298 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*298);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwCSTab_GeomW4P1_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water4-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRFCut_VdwCSTab_GeomW4P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_add_ps(rinv30,_mm256_mul_ps(krf,rsq30)),crf));
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 167 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_add_ps(rinv30,_mm256_mul_ps(krf,rsq30)),crf));
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 168 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*168);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwCSTab_GeomW4P1_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water4-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRFCut_VdwCSTab_GeomW4P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 141 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 142 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*142);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwCSTab_GeomW4W4_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water4-Water4
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRFCut_VdwCSTab_GeomW4W4_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_add_ps(rinv13,_mm256_mul_ps(krf,rsq13)),crf));
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_add_ps(rinv23,_mm256_mul_ps(krf,rsq23)),crf));
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_add_ps(rinv31,_mm256_mul_ps(krf,rsq31)),crf));
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_add_ps(rinv32,_mm256_mul_ps(krf,rsq32)),crf));
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_add_ps(rinv33,_mm256_mul_ps(krf,rsq33)),crf));
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 387 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_add_ps(rinv13,_mm256_mul_ps(krf,rsq13)),crf));
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_add_ps(rinv23,_mm256_mul_ps(krf,rsq23)),crf));
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_add_ps(rinv31,_mm256_mul_ps(krf,rsq31)),crf));
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_add_ps(rinv32,_mm256_mul_ps(krf,rsq32)),crf));
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_add_ps(rinv33,_mm256_mul_ps(krf,rsq33)),crf));
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 388 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*388);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwCSTab_GeomW4W4_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water4-Water4
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRFCut_VdwCSTab_GeomW4W4_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 324 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 325 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*325);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwLJSh_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRFCut_VdwLJSh_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 54 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 54 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 9 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*54);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwLJSh_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRFCut_VdwLJSh_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 37 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 37 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*37);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwLJSh_GeomW3P1_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRFCut_VdwLJSh_GeomW3P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 129 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 129 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*129);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwLJSh_GeomW3P1_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRFCut_VdwLJSh_GeomW3P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 100 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 100 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*100);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwLJSh_GeomW3W3_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Water3
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRFCut_VdwLJSh_GeomW3W3_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_add_ps(rinv01,_mm256_mul_ps(krf,rsq01)),crf));
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_add_ps(rinv02,_mm256_mul_ps(krf,rsq02)),crf));
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 342 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_add_ps(rinv01,_mm256_mul_ps(krf,rsq01)),crf));
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_add_ps(rinv02,_mm256_mul_ps(krf,rsq02)),crf));
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 342 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*342);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwLJSh_GeomW3W3_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Water3
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRFCut_VdwLJSh_GeomW3W3_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 277 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 277 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*277);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwLJSh_GeomW4P1_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRFCut_VdwLJSh_GeomW4P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_add_ps(rinv30,_mm256_mul_ps(krf,rsq30)),crf));
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 152 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_add_ps(rinv30,_mm256_mul_ps(krf,rsq30)),crf));
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 152 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*152);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwLJSh_GeomW4P1_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRFCut_VdwLJSh_GeomW4P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 123 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 123 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*123);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwLJSh_GeomW4W4_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Water4
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRFCut_VdwLJSh_GeomW4W4_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_add_ps(rinv13,_mm256_mul_ps(krf,rsq13)),crf));
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_add_ps(rinv23,_mm256_mul_ps(krf,rsq23)),crf));
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_add_ps(rinv31,_mm256_mul_ps(krf,rsq31)),crf));
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_add_ps(rinv32,_mm256_mul_ps(krf,rsq32)),crf));
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_add_ps(rinv33,_mm256_mul_ps(krf,rsq33)),crf));
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 368 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_add_ps(rinv13,_mm256_mul_ps(krf,rsq13)),crf));
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_add_ps(rinv23,_mm256_mul_ps(krf,rsq23)),crf));
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_add_ps(rinv31,_mm256_mul_ps(krf,rsq31)),crf));
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_add_ps(rinv32,_mm256_mul_ps(krf,rsq32)),crf));
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_add_ps(rinv33,_mm256_mul_ps(krf,rsq33)),crf));
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 368 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*368);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwLJSh_GeomW4W4_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Water4
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRFCut_VdwLJSh_GeomW4W4_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 303 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 303 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*303);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwLJSw_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRFCut_VdwLJSw_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rvdw_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 70 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 71 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 9 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*71);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwLJSw_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRFCut_VdwLJSw_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rvdw_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 61 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 62 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*62);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwLJSw_GeomW3P1_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRFCut_VdwLJSw_GeomW3P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rvdw_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 145 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 146 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*146);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwLJSw_GeomW3P1_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRFCut_VdwLJSw_GeomW3P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rvdw_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 124 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 125 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*125);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwLJSw_GeomW3W3_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Water3
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRFCut_VdwLJSw_GeomW3W3_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rvdw_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_add_ps(rinv01,_mm256_mul_ps(krf,rsq01)),crf));
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_add_ps(rinv02,_mm256_mul_ps(krf,rsq02)),crf));
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 358 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_add_ps(rinv01,_mm256_mul_ps(krf,rsq01)),crf));
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_add_ps(rinv02,_mm256_mul_ps(krf,rsq02)),crf));
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 359 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*359);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwLJSw_GeomW3W3_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Water3
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRFCut_VdwLJSw_GeomW3W3_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rvdw_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 301 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 302 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*302);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwLJSw_GeomW4P1_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRFCut_VdwLJSw_GeomW4P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rvdw_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_add_ps(rinv30,_mm256_mul_ps(krf,rsq30)),crf));
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 170 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_add_ps(rinv30,_mm256_mul_ps(krf,rsq30)),crf));
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 171 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*171);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwLJSw_GeomW4P1_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRFCut_VdwLJSw_GeomW4P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rvdw_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 149 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 150 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*150);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwLJSw_GeomW4W4_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Water4
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRFCut_VdwLJSw_GeomW4W4_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rvdw_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_add_ps(rinv13,_mm256_mul_ps(krf,rsq13)),crf));
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_add_ps(rinv23,_mm256_mul_ps(krf,rsq23)),crf));
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_add_ps(rinv31,_mm256_mul_ps(krf,rsq31)),crf));
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_add_ps(rinv32,_mm256_mul_ps(krf,rsq32)),crf));
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_add_ps(rinv33,_mm256_mul_ps(krf,rsq33)),crf));
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 386 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_add_ps(rinv13,_mm256_mul_ps(krf,rsq13)),crf));
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_add_ps(rinv23,_mm256_mul_ps(krf,rsq23)),crf));
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_add_ps(rinv31,_mm256_mul_ps(krf,rsq31)),crf));
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_add_ps(rinv32,_mm256_mul_ps(krf,rsq32)),crf));
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_add_ps(rinv33,_mm256_mul_ps(krf,rsq33)),crf));
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 387 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*387);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwLJSw_GeomW4W4_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Water4
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRFCut_VdwLJSw_GeomW4W4_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ rswitch_scalar = fr->rvdw_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 329 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ d = _mm256_sub_ps(r00,rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+
+ /* Evaluate switch function */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = fvdw;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 330 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*330);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwNone_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: None
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRFCut_VdwNone_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 36 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 36 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 8 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VF,outeriter*8 + inneriter*36);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwNone_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: None
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRFCut_VdwNone_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 30 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ }
+
+ /* Inner loop uses 30 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_F,outeriter*7 + inneriter*30);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwNone_GeomW3P1_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: None
+ * Geometry: Water3-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRFCut_VdwNone_GeomW3P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 111 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 111 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_VF,outeriter*19 + inneriter*111);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwNone_GeomW3P1_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: None
+ * Geometry: Water3-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRFCut_VdwNone_GeomW3P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 93 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 93 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_F,outeriter*18 + inneriter*93);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwNone_GeomW3W3_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: None
+ * Geometry: Water3-Water3
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRFCut_VdwNone_GeomW3W3_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_add_ps(rinv01,_mm256_mul_ps(krf,rsq01)),crf));
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_add_ps(rinv02,_mm256_mul_ps(krf,rsq02)),crf));
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 324 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_add_ps(rinv01,_mm256_mul_ps(krf,rsq01)),crf));
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_add_ps(rinv02,_mm256_mul_ps(krf,rsq02)),crf));
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 324 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_VF,outeriter*19 + inneriter*324);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwNone_GeomW3W3_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: None
+ * Geometry: Water3-Water3
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRFCut_VdwNone_GeomW3W3_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 270 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq00,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq01,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq02,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 270 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_F,outeriter*18 + inneriter*270);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwNone_GeomW4P1_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: None
+ * Geometry: Water4-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRFCut_VdwNone_GeomW4P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_add_ps(rinv30,_mm256_mul_ps(krf,rsq30)),crf));
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 111 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_add_ps(rinv30,_mm256_mul_ps(krf,rsq30)),crf));
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 111 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_VF,outeriter*19 + inneriter*111);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwNone_GeomW4P1_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: None
+ * Geometry: Water4-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRFCut_VdwNone_GeomW4P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 93 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq10,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq20,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq30,rcutoff2))
+ {
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 93 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_F,outeriter*18 + inneriter*93);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwNone_GeomW4W4_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: None
+ * Geometry: Water4-Water4
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRFCut_VdwNone_GeomW4W4_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_add_ps(rinv13,_mm256_mul_ps(krf,rsq13)),crf));
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_add_ps(rinv23,_mm256_mul_ps(krf,rsq23)),crf));
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_add_ps(rinv31,_mm256_mul_ps(krf,rsq31)),crf));
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_add_ps(rinv32,_mm256_mul_ps(krf,rsq32)),crf));
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_add_ps(rinv33,_mm256_mul_ps(krf,rsq33)),crf));
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 324 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_add_ps(rinv13,_mm256_mul_ps(krf,rsq13)),crf));
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_add_ps(rinv23,_mm256_mul_ps(krf,rsq23)),crf));
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_add_ps(rinv31,_mm256_mul_ps(krf,rsq31)),crf));
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_add_ps(rinv32,_mm256_mul_ps(krf,rsq32)),crf));
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_add_ps(rinv33,_mm256_mul_ps(krf,rsq33)),crf));
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 324 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_VF,outeriter*19 + inneriter*324);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwNone_GeomW4W4_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: None
+ * Geometry: Water4-Water4
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRFCut_VdwNone_GeomW4W4_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 270 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq11,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq12,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq13,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq21,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq22,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq23,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq31,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq32,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ }
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ if (gmx_mm256_any_lt(rsq33,rcutoff2))
+ {
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
+
+ fscal = felec;
+
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ }
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 270 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_F,outeriter*18 + inneriter*270);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwCSTab_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: CubicSplineTable
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRF_VdwCSTab_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 67 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 68 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 9 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*68);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwCSTab_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: CubicSplineTable
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRF_VdwCSTab_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 54 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 55 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*55);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwCSTab_GeomW3P1_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water3-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRF_VdwCSTab_GeomW3P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 134 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 135 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*135);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwCSTab_GeomW3P1_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water3-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRF_VdwCSTab_GeomW3P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 111 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 112 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*112);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwCSTab_GeomW3W3_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water3-Water3
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRF_VdwCSTab_GeomW3W3_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_add_ps(rinv01,_mm256_mul_ps(krf,rsq01)),crf));
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_add_ps(rinv02,_mm256_mul_ps(krf,rsq02)),crf));
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 323 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_add_ps(rinv01,_mm256_mul_ps(krf,rsq01)),crf));
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_add_ps(rinv02,_mm256_mul_ps(krf,rsq02)),crf));
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 324 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*324);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwCSTab_GeomW3W3_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water3-Water3
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRF_VdwCSTab_GeomW3W3_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 270 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 271 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*271);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwCSTab_GeomW4P1_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water4-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRF_VdwCSTab_GeomW4P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_add_ps(rinv30,_mm256_mul_ps(krf,rsq30)),crf));
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 155 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_add_ps(rinv30,_mm256_mul_ps(krf,rsq30)),crf));
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 156 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*156);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwCSTab_GeomW4P1_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water4-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRF_VdwCSTab_GeomW4P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 132 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 133 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*133);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwCSTab_GeomW4W4_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water4-Water4
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRF_VdwCSTab_GeomW4W4_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_add_ps(rinv13,_mm256_mul_ps(krf,rsq13)),crf));
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_add_ps(rinv23,_mm256_mul_ps(krf,rsq23)),crf));
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_add_ps(rinv31,_mm256_mul_ps(krf,rsq31)),crf));
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_add_ps(rinv32,_mm256_mul_ps(krf,rsq32)),crf));
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_add_ps(rinv33,_mm256_mul_ps(krf,rsq33)),crf));
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 347 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_00,VV);
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_add_ps(rinv13,_mm256_mul_ps(krf,rsq13)),crf));
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_add_ps(rinv23,_mm256_mul_ps(krf,rsq23)),crf));
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_add_ps(rinv31,_mm256_mul_ps(krf,rsq31)),crf));
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_add_ps(rinv32,_mm256_mul_ps(krf,rsq32)),crf));
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_add_ps(rinv33,_mm256_mul_ps(krf,rsq33)),crf));
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 348 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*348);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwCSTab_GeomW4W4_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: CubicSplineTable
+ * Geometry: Water4-Water4
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRF_VdwCSTab_GeomW4W4_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 294 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ r00 = _mm256_mul_ps(rsq00,rinv00);
+ r00 = _mm256_andnot_ps(dummy_mask,r00);
+
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r00,vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_00,FF);
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_00,FF);
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 295 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*295);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwLJ_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRF_VdwLJ_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 44 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 44 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 9 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*44);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwLJ_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRF_VdwLJ_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 34 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 34 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*34);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwLJ_GeomW3P1_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRF_VdwLJ_GeomW3P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 111 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 111 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*111);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwLJ_GeomW3P1_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRF_VdwLJ_GeomW3P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 91 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 91 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*91);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwLJ_GeomW3W3_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Water3
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRF_VdwLJ_GeomW3W3_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_add_ps(rinv01,_mm256_mul_ps(krf,rsq01)),crf));
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_add_ps(rinv02,_mm256_mul_ps(krf,rsq02)),crf));
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 300 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_add_ps(rinv01,_mm256_mul_ps(krf,rsq01)),crf));
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_add_ps(rinv02,_mm256_mul_ps(krf,rsq02)),crf));
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 300 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 20 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*300);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwLJ_GeomW3W3_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water3-Water3
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRF_VdwLJ_GeomW3W3_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 250 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = _mm256_add_ps(felec,fvdw);
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 250 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*250);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwLJ_GeomW4P1_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRF_VdwLJ_GeomW4P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_add_ps(rinv30,_mm256_mul_ps(krf,rsq30)),crf));
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 131 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_add_ps(rinv30,_mm256_mul_ps(krf,rsq30)),crf));
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 131 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*131);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwLJ_GeomW4P1_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRF_VdwLJ_GeomW4P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 111 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+ vdwjidx0A = 2*vdwtype[jnrA+0];
+ vdwjidx0B = 2*vdwtype[jnrB+0];
+ vdwjidx0C = 2*vdwtype[jnrC+0];
+ vdwjidx0D = 2*vdwtype[jnrD+0];
+ vdwjidx0E = 2*vdwtype[jnrE+0];
+ vdwjidx0F = 2*vdwtype[jnrF+0];
+ vdwjidx0G = 2*vdwtype[jnrG+0];
+ vdwjidx0H = 2*vdwtype[jnrH+0];
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
+ vdwioffsetptr0+vdwjidx0B,
+ vdwioffsetptr0+vdwjidx0C,
+ vdwioffsetptr0+vdwjidx0D,
+ vdwioffsetptr0+vdwjidx0E,
+ vdwioffsetptr0+vdwjidx0F,
+ vdwioffsetptr0+vdwjidx0G,
+ vdwioffsetptr0+vdwjidx0H,
+ &c6_00,&c12_00);
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 111 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*111);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwLJ_GeomW4W4_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Water4
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRF_VdwLJ_GeomW4W4_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+ vvdwsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_add_ps(rinv13,_mm256_mul_ps(krf,rsq13)),crf));
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_add_ps(rinv23,_mm256_mul_ps(krf,rsq23)),crf));
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_add_ps(rinv31,_mm256_mul_ps(krf,rsq31)),crf));
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_add_ps(rinv32,_mm256_mul_ps(krf,rsq32)),crf));
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_add_ps(rinv33,_mm256_mul_ps(krf,rsq33)),crf));
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 323 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_add_ps(rinv13,_mm256_mul_ps(krf,rsq13)),crf));
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_add_ps(rinv23,_mm256_mul_ps(krf,rsq23)),crf));
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_add_ps(rinv31,_mm256_mul_ps(krf,rsq31)),crf));
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_add_ps(rinv32,_mm256_mul_ps(krf,rsq32)),crf));
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_add_ps(rinv33,_mm256_mul_ps(krf,rsq33)),crf));
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 323 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 26 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*323);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwLJ_GeomW4W4_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: LennardJones
+ * Geometry: Water4-Water4
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRF_VdwLJ_GeomW4W4_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+ vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ vdwjidx0A = 2*vdwtype[inr+0];
+ c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
+ c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = fvdw;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 273 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq00 = gmx_mm256_inv_ps(rsq00);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
+
+ fscal = fvdw;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 273 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 24 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*273);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwNone_GeomP1P1_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: None
+ * Geometry: Particle-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRF_VdwNone_GeomP1P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 32 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 32 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 8 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VF,outeriter*8 + inneriter*32);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwNone_GeomP1P1_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: None
+ * Geometry: Particle-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRF_VdwNone_GeomP1P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+
+ /* Load parameters for i particles */
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 27 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+
+ /* Inner loop uses 27 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 7 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_F,outeriter*7 + inneriter*27);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwNone_GeomW3P1_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: None
+ * Geometry: Water3-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRF_VdwNone_GeomW3P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 99 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 99 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_VF,outeriter*19 + inneriter*99);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwNone_GeomW3P1_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: None
+ * Geometry: Water3-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRF_VdwNone_GeomW3P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 84 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq00 = _mm256_mul_ps(iq0,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 84 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3_F,outeriter*18 + inneriter*84);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwNone_GeomW3W3_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: None
+ * Geometry: Water3-Water3
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRF_VdwNone_GeomW3W3_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_add_ps(rinv01,_mm256_mul_ps(krf,rsq01)),crf));
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_add_ps(rinv02,_mm256_mul_ps(krf,rsq02)),crf));
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 288 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_add_ps(rinv01,_mm256_mul_ps(krf,rsq01)),crf));
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_add_ps(rinv02,_mm256_mul_ps(krf,rsq02)),crf));
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 288 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_VF,outeriter*19 + inneriter*288);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwNone_GeomW3W3_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: None
+ * Geometry: Water3-Water3
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRF_VdwNone_GeomW3W3_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr0;
+ __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
+ __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
+ __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+
+ jq0 = _mm256_set1_ps(charge[inr+0]);
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ qq00 = _mm256_mul_ps(iq0,jq0);
+ qq01 = _mm256_mul_ps(iq0,jq1);
+ qq02 = _mm256_mul_ps(iq0,jq2);
+ qq10 = _mm256_mul_ps(iq1,jq0);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq20 = _mm256_mul_ps(iq2,jq0);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+
+ fix0 = _mm256_setzero_ps();
+ fiy0 = _mm256_setzero_ps();
+ fiz0 = _mm256_setzero_ps();
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 243 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+
+ /* Calculate displacement vector */
+ dx00 = _mm256_sub_ps(ix0,jx0);
+ dy00 = _mm256_sub_ps(iy0,jy0);
+ dz00 = _mm256_sub_ps(iz0,jz0);
+ dx01 = _mm256_sub_ps(ix0,jx1);
+ dy01 = _mm256_sub_ps(iy0,jy1);
+ dz01 = _mm256_sub_ps(iz0,jz1);
+ dx02 = _mm256_sub_ps(ix0,jx2);
+ dy02 = _mm256_sub_ps(iy0,jy2);
+ dz02 = _mm256_sub_ps(iz0,jz2);
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+
+ /* Calculate squared distance and things based on it */
+ rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
+ rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
+ rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+
+ rinv00 = gmx_mm256_invsqrt_ps(rsq00);
+ rinv01 = gmx_mm256_invsqrt_ps(rsq01);
+ rinv02 = gmx_mm256_invsqrt_ps(rsq02);
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+
+ rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
+ rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
+ rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx00);
+ ty = _mm256_mul_ps(fscal,dy00);
+ tz = _mm256_mul_ps(fscal,dz00);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx01);
+ ty = _mm256_mul_ps(fscal,dy01);
+ tz = _mm256_mul_ps(fscal,dz01);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx02);
+ ty = _mm256_mul_ps(fscal,dy02);
+ tz = _mm256_mul_ps(fscal,dz02);
+
+ /* Update vectorial force */
+ fix0 = _mm256_add_ps(fix0,tx);
+ fiy0 = _mm256_add_ps(fiy0,ty);
+ fiz0 = _mm256_add_ps(fiz0,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+
+ /* Inner loop uses 243 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_F,outeriter*18 + inneriter*243);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwNone_GeomW4P1_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: None
+ * Geometry: Water4-Particle
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRF_VdwNone_GeomW4P1_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_add_ps(rinv30,_mm256_mul_ps(krf,rsq30)),crf));
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 99 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_add_ps(rinv30,_mm256_mul_ps(krf,rsq30)),crf));
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 99 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_VF,outeriter*19 + inneriter*99);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwNone_GeomW4P1_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: None
+ * Geometry: Water4-Particle
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRF_VdwNone_GeomW4P1_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
+ __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
+ __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
+ __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
+ __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 84 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+
+ /* Calculate displacement vector */
+ dx10 = _mm256_sub_ps(ix1,jx0);
+ dy10 = _mm256_sub_ps(iy1,jy0);
+ dz10 = _mm256_sub_ps(iz1,jz0);
+ dx20 = _mm256_sub_ps(ix2,jx0);
+ dy20 = _mm256_sub_ps(iy2,jy0);
+ dz20 = _mm256_sub_ps(iz2,jz0);
+ dx30 = _mm256_sub_ps(ix3,jx0);
+ dy30 = _mm256_sub_ps(iy3,jy0);
+ dz30 = _mm256_sub_ps(iz3,jz0);
+
+ /* Calculate squared distance and things based on it */
+ rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
+ rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
+ rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
+
+ rinv10 = gmx_mm256_invsqrt_ps(rsq10);
+ rinv20 = gmx_mm256_invsqrt_ps(rsq20);
+ rinv30 = gmx_mm256_invsqrt_ps(rsq30);
+
+ rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
+ rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
+ rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
+
+ /* Load parameters for j particles */
+ jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
+ charge+jnrC+0,charge+jnrD+0,
+ charge+jnrE+0,charge+jnrF+0,
+ charge+jnrG+0,charge+jnrH+0);
+
+ fjx0 = _mm256_setzero_ps();
+ fjy0 = _mm256_setzero_ps();
+ fjz0 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq10 = _mm256_mul_ps(iq1,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx10);
+ ty = _mm256_mul_ps(fscal,dy10);
+ tz = _mm256_mul_ps(fscal,dz10);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq20 = _mm256_mul_ps(iq2,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx20);
+ ty = _mm256_mul_ps(fscal,dy20);
+ tz = _mm256_mul_ps(fscal,dz20);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* Compute parameters for interactions between i and j atoms */
+ qq30 = _mm256_mul_ps(iq3,jq0);
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq30,_mm256_sub_ps(_mm256_mul_ps(rinv30,rinvsq30),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx30);
+ ty = _mm256_mul_ps(fscal,dy30);
+ tz = _mm256_mul_ps(fscal,dz30);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx0 = _mm256_add_ps(fjx0,tx);
+ fjy0 = _mm256_add_ps(fjy0,ty);
+ fjz0 = _mm256_add_ps(fjz0,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+
+ /* Inner loop uses 84 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_F,outeriter*18 + inneriter*84);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwNone_GeomW4W4_VF_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: None
+ * Geometry: Water4-Water4
+ * Calculate force/pot: PotentialAndForce
+ */
+void
+nb_kernel_ElecRF_VdwNone_GeomW4W4_VF_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Reset potential sums */
+ velecsum = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_add_ps(rinv13,_mm256_mul_ps(krf,rsq13)),crf));
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_add_ps(rinv23,_mm256_mul_ps(krf,rsq23)),crf));
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_add_ps(rinv31,_mm256_mul_ps(krf,rsq31)),crf));
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_add_ps(rinv32,_mm256_mul_ps(krf,rsq32)),crf));
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_add_ps(rinv33,_mm256_mul_ps(krf,rsq33)),crf));
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 288 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_add_ps(rinv13,_mm256_mul_ps(krf,rsq13)),crf));
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_add_ps(rinv23,_mm256_mul_ps(krf,rsq23)),crf));
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_add_ps(rinv31,_mm256_mul_ps(krf,rsq31)),crf));
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_add_ps(rinv32,_mm256_mul_ps(krf,rsq32)),crf));
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_add_ps(rinv33,_mm256_mul_ps(krf,rsq33)),crf));
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ velecsum = _mm256_add_ps(velecsum,velec);
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 288 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ ggid = gid[iidx];
+ /* Update potential energies */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 19 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_VF,outeriter*19 + inneriter*288);
+}
+/*
+ * Gromacs nonbonded kernel: nb_kernel_ElecRF_VdwNone_GeomW4W4_F_avx_256_single
+ * Electrostatics interaction: ReactionField
+ * VdW interaction: None
+ * Geometry: Water4-Water4
+ * Calculate force/pot: Force
+ */
+void
+nb_kernel_ElecRF_VdwNone_GeomW4W4_F_avx_256_single
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ real * vdwioffsetptr1;
+ __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
+ real * vdwioffsetptr2;
+ __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
+ real * vdwioffsetptr3;
+ __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
+ int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
+ __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
+ int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
+ __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
+ int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
+ __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
+ __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
+ __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
+ __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
+ __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
+ __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
+ __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
+ __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
+ __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
+ __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
+ iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
+ iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
+
+ jq1 = _mm256_set1_ps(charge[inr+1]);
+ jq2 = _mm256_set1_ps(charge[inr+2]);
+ jq3 = _mm256_set1_ps(charge[inr+3]);
+ qq11 = _mm256_mul_ps(iq1,jq1);
+ qq12 = _mm256_mul_ps(iq1,jq2);
+ qq13 = _mm256_mul_ps(iq1,jq3);
+ qq21 = _mm256_mul_ps(iq2,jq1);
+ qq22 = _mm256_mul_ps(iq2,jq2);
+ qq23 = _mm256_mul_ps(iq2,jq3);
+ qq31 = _mm256_mul_ps(iq3,jq1);
+ qq32 = _mm256_mul_ps(iq3,jq2);
+ qq33 = _mm256_mul_ps(iq3,jq3);
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+
+ fix1 = _mm256_setzero_ps();
+ fiy1 = _mm256_setzero_ps();
+ fiz1 = _mm256_setzero_ps();
+ fix2 = _mm256_setzero_ps();
+ fiy2 = _mm256_setzero_ps();
+ fiz2 = _mm256_setzero_ps();
+ fix3 = _mm256_setzero_ps();
+ fiy3 = _mm256_setzero_ps();
+ fiz3 = _mm256_setzero_ps();
+
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ fscal = felec;
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 243 flops */
+ }
+
+ if(jidx<j_index_end)
+ {
+
+ /* Get j neighbor index, and coordinate index */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+
+ /* Calculate displacement vector */
+ dx11 = _mm256_sub_ps(ix1,jx1);
+ dy11 = _mm256_sub_ps(iy1,jy1);
+ dz11 = _mm256_sub_ps(iz1,jz1);
+ dx12 = _mm256_sub_ps(ix1,jx2);
+ dy12 = _mm256_sub_ps(iy1,jy2);
+ dz12 = _mm256_sub_ps(iz1,jz2);
+ dx13 = _mm256_sub_ps(ix1,jx3);
+ dy13 = _mm256_sub_ps(iy1,jy3);
+ dz13 = _mm256_sub_ps(iz1,jz3);
+ dx21 = _mm256_sub_ps(ix2,jx1);
+ dy21 = _mm256_sub_ps(iy2,jy1);
+ dz21 = _mm256_sub_ps(iz2,jz1);
+ dx22 = _mm256_sub_ps(ix2,jx2);
+ dy22 = _mm256_sub_ps(iy2,jy2);
+ dz22 = _mm256_sub_ps(iz2,jz2);
+ dx23 = _mm256_sub_ps(ix2,jx3);
+ dy23 = _mm256_sub_ps(iy2,jy3);
+ dz23 = _mm256_sub_ps(iz2,jz3);
+ dx31 = _mm256_sub_ps(ix3,jx1);
+ dy31 = _mm256_sub_ps(iy3,jy1);
+ dz31 = _mm256_sub_ps(iz3,jz1);
+ dx32 = _mm256_sub_ps(ix3,jx2);
+ dy32 = _mm256_sub_ps(iy3,jy2);
+ dz32 = _mm256_sub_ps(iz3,jz2);
+ dx33 = _mm256_sub_ps(ix3,jx3);
+ dy33 = _mm256_sub_ps(iy3,jy3);
+ dz33 = _mm256_sub_ps(iz3,jz3);
+
+ /* Calculate squared distance and things based on it */
+ rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
+ rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
+ rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
+ rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
+ rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
+ rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
+ rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
+ rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
+ rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
+
+ rinv11 = gmx_mm256_invsqrt_ps(rsq11);
+ rinv12 = gmx_mm256_invsqrt_ps(rsq12);
+ rinv13 = gmx_mm256_invsqrt_ps(rsq13);
+ rinv21 = gmx_mm256_invsqrt_ps(rsq21);
+ rinv22 = gmx_mm256_invsqrt_ps(rsq22);
+ rinv23 = gmx_mm256_invsqrt_ps(rsq23);
+ rinv31 = gmx_mm256_invsqrt_ps(rsq31);
+ rinv32 = gmx_mm256_invsqrt_ps(rsq32);
+ rinv33 = gmx_mm256_invsqrt_ps(rsq33);
+
+ rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
+ rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
+ rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
+ rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
+ rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
+ rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
+ rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
+ rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
+ rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
+
+ fjx1 = _mm256_setzero_ps();
+ fjy1 = _mm256_setzero_ps();
+ fjz1 = _mm256_setzero_ps();
+ fjx2 = _mm256_setzero_ps();
+ fjy2 = _mm256_setzero_ps();
+ fjz2 = _mm256_setzero_ps();
+ fjx3 = _mm256_setzero_ps();
+ fjy3 = _mm256_setzero_ps();
+ fjz3 = _mm256_setzero_ps();
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx11);
+ ty = _mm256_mul_ps(fscal,dy11);
+ tz = _mm256_mul_ps(fscal,dz11);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx12);
+ ty = _mm256_mul_ps(fscal,dy12);
+ tz = _mm256_mul_ps(fscal,dz12);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx13);
+ ty = _mm256_mul_ps(fscal,dy13);
+ tz = _mm256_mul_ps(fscal,dz13);
+
+ /* Update vectorial force */
+ fix1 = _mm256_add_ps(fix1,tx);
+ fiy1 = _mm256_add_ps(fiy1,ty);
+ fiz1 = _mm256_add_ps(fiz1,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx21);
+ ty = _mm256_mul_ps(fscal,dy21);
+ tz = _mm256_mul_ps(fscal,dz21);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx22);
+ ty = _mm256_mul_ps(fscal,dy22);
+ tz = _mm256_mul_ps(fscal,dz22);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx23);
+ ty = _mm256_mul_ps(fscal,dy23);
+ tz = _mm256_mul_ps(fscal,dz23);
+
+ /* Update vectorial force */
+ fix2 = _mm256_add_ps(fix2,tx);
+ fiy2 = _mm256_add_ps(fiy2,ty);
+ fiz2 = _mm256_add_ps(fiz2,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx31);
+ ty = _mm256_mul_ps(fscal,dy31);
+ tz = _mm256_mul_ps(fscal,dz31);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx1 = _mm256_add_ps(fjx1,tx);
+ fjy1 = _mm256_add_ps(fjy1,ty);
+ fjz1 = _mm256_add_ps(fjz1,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx32);
+ ty = _mm256_mul_ps(fscal,dy32);
+ tz = _mm256_mul_ps(fscal,dz32);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx2 = _mm256_add_ps(fjx2,tx);
+ fjy2 = _mm256_add_ps(fjy2,ty);
+ fjz2 = _mm256_add_ps(fjz2,tz);
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ felec = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
+
+ fscal = felec;
+
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx33);
+ ty = _mm256_mul_ps(fscal,dy33);
+ tz = _mm256_mul_ps(fscal,dz33);
+
+ /* Update vectorial force */
+ fix3 = _mm256_add_ps(fix3,tx);
+ fiy3 = _mm256_add_ps(fiy3,ty);
+ fiz3 = _mm256_add_ps(fiz3,tz);
+
+ fjx3 = _mm256_add_ps(fjx3,tx);
+ fjy3 = _mm256_add_ps(fjy3,ty);
+ fjz3 = _mm256_add_ps(fjz3,tz);
+
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+
+ /* Inner loop uses 243 flops */
+ }
+
+ /* End of innermost loop */
+
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses 18 flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_F,outeriter*18 + inneriter*243);
+}
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs avx_256_single kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifndef nb_kernel_avx_256_single_h
+#define nb_kernel_avx_256_single_h
+
+#include "../nb_kernel.h"
+
+nb_kernel_t nb_kernel_ElecNone_VdwLJ_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecNone_VdwLJ_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecNone_VdwLJSh_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecNone_VdwLJSh_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecNone_VdwLJSw_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecNone_VdwLJSw_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecNone_VdwCSTab_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecNone_VdwCSTab_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwLJ_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwLJ_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwLJ_GeomW3P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwLJ_GeomW3P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwLJ_GeomW3W3_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwLJ_GeomW3W3_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwLJ_GeomW4P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwLJ_GeomW4P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwLJ_GeomW4W4_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwLJ_GeomW4W4_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwNone_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwNone_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwNone_GeomW3P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwNone_GeomW3P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwNone_GeomW3W3_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwNone_GeomW3W3_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwNone_GeomW4P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwNone_GeomW4P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwNone_GeomW4W4_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwNone_GeomW4W4_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwCSTab_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwCSTab_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwCSTab_GeomW3P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwCSTab_GeomW3P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwCSTab_GeomW3W3_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwCSTab_GeomW3W3_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwCSTab_GeomW4P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwCSTab_GeomW4P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwCSTab_GeomW4W4_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEw_VdwCSTab_GeomW4W4_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSh_VdwLJSh_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSh_VdwLJSh_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSh_VdwLJSh_GeomW4P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSh_VdwLJSh_GeomW4P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSh_VdwLJSh_GeomW4W4_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSh_VdwLJSh_GeomW4W4_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSh_VdwNone_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSh_VdwNone_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSh_VdwNone_GeomW3P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSh_VdwNone_GeomW3P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSh_VdwNone_GeomW3W3_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSh_VdwNone_GeomW3W3_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSh_VdwNone_GeomW4P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSh_VdwNone_GeomW4P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSh_VdwNone_GeomW4W4_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSh_VdwNone_GeomW4W4_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSw_VdwLJSw_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSw_VdwLJSw_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSw_VdwLJSw_GeomW3P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSw_VdwLJSw_GeomW3P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSw_VdwNone_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSw_VdwNone_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSw_VdwNone_GeomW3P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSw_VdwNone_GeomW3P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSw_VdwNone_GeomW3W3_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSw_VdwNone_GeomW3W3_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSw_VdwNone_GeomW4P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSw_VdwNone_GeomW4P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSw_VdwNone_GeomW4W4_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecEwSw_VdwNone_GeomW4W4_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwLJ_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwLJ_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwLJ_GeomW3P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwLJ_GeomW3P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwLJ_GeomW3W3_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwLJ_GeomW3W3_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwLJ_GeomW4P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwLJ_GeomW4P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwLJ_GeomW4W4_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwLJ_GeomW4W4_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwNone_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwNone_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwNone_GeomW3P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwNone_GeomW3P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwNone_GeomW3W3_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwNone_GeomW3W3_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwNone_GeomW4P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwNone_GeomW4P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwNone_GeomW4W4_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwNone_GeomW4W4_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwCSTab_GeomW3P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwCSTab_GeomW3P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwCSTab_GeomW4P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwCSTab_GeomW4P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwLJ_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwLJ_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwLJ_GeomW3P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwLJ_GeomW3P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwLJ_GeomW3W3_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwLJ_GeomW3W3_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwLJ_GeomW4P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwLJ_GeomW4P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwLJ_GeomW4W4_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwLJ_GeomW4W4_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwNone_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwNone_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwNone_GeomW3P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwNone_GeomW3P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwNone_GeomW3W3_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwNone_GeomW3W3_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwNone_GeomW4P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwNone_GeomW4P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwNone_GeomW4W4_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwNone_GeomW4W4_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwCSTab_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwCSTab_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwCSTab_GeomW3P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwCSTab_GeomW3P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwCSTab_GeomW3W3_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwCSTab_GeomW3W3_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwCSTab_GeomW4P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwCSTab_GeomW4P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwCSTab_GeomW4W4_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecCSTab_VdwCSTab_GeomW4W4_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecGB_VdwLJ_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecGB_VdwLJ_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecGB_VdwNone_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecGB_VdwNone_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecGB_VdwCSTab_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecGB_VdwCSTab_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwLJSh_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwLJSh_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwLJSh_GeomW3P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwLJSh_GeomW3P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwLJSh_GeomW3W3_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwLJSh_GeomW3W3_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwLJSh_GeomW4P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwLJSh_GeomW4P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwLJSh_GeomW4W4_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwLJSh_GeomW4W4_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwLJSw_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwLJSw_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwLJSw_GeomW3P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwLJSw_GeomW3P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwLJSw_GeomW3W3_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwLJSw_GeomW3W3_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwLJSw_GeomW4P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwLJSw_GeomW4P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwLJSw_GeomW4W4_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwLJSw_GeomW4W4_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwNone_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwNone_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwNone_GeomW3P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwNone_GeomW3P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwNone_GeomW3W3_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwNone_GeomW3W3_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwNone_GeomW4P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwNone_GeomW4P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwNone_GeomW4W4_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwNone_GeomW4W4_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwCSTab_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwCSTab_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwCSTab_GeomW3P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwCSTab_GeomW3P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwCSTab_GeomW4P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwCSTab_GeomW4P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwCSTab_GeomW4W4_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRFCut_VdwCSTab_GeomW4W4_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwLJ_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwLJ_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwLJ_GeomW3P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwLJ_GeomW3P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwLJ_GeomW3W3_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwLJ_GeomW3W3_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwLJ_GeomW4P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwLJ_GeomW4P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwLJ_GeomW4W4_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwLJ_GeomW4W4_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwNone_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwNone_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwNone_GeomW3P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwNone_GeomW3P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwNone_GeomW3W3_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwNone_GeomW3W3_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwNone_GeomW4P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwNone_GeomW4P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwNone_GeomW4W4_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwNone_GeomW4W4_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwCSTab_GeomP1P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwCSTab_GeomP1P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwCSTab_GeomW3P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwCSTab_GeomW3P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwCSTab_GeomW3W3_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwCSTab_GeomW3W3_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwCSTab_GeomW4P1_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwCSTab_GeomW4P1_F_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwCSTab_GeomW4W4_VF_avx_256_single;
+nb_kernel_t nb_kernel_ElecRF_VdwCSTab_GeomW4W4_F_avx_256_single;
+
+
+nb_kernel_info_t
+kernellist_avx_256_single[] =
+{
+ { nb_kernel_ElecNone_VdwLJ_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecNone_VdwLJ_GeomP1P1_VF_avx_256_single", "avx_256_single", "None", "None", "LennardJones", "None", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecNone_VdwLJ_GeomP1P1_F_avx_256_single, "nb_kernel_ElecNone_VdwLJ_GeomP1P1_F_avx_256_single", "avx_256_single", "None", "None", "LennardJones", "None", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecNone_VdwLJSh_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecNone_VdwLJSh_GeomP1P1_VF_avx_256_single", "avx_256_single", "None", "None", "LennardJones", "PotentialShift", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecNone_VdwLJSh_GeomP1P1_F_avx_256_single, "nb_kernel_ElecNone_VdwLJSh_GeomP1P1_F_avx_256_single", "avx_256_single", "None", "None", "LennardJones", "PotentialShift", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecNone_VdwLJSw_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecNone_VdwLJSw_GeomP1P1_VF_avx_256_single", "avx_256_single", "None", "None", "LennardJones", "PotentialSwitch", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecNone_VdwLJSw_GeomP1P1_F_avx_256_single, "nb_kernel_ElecNone_VdwLJSw_GeomP1P1_F_avx_256_single", "avx_256_single", "None", "None", "LennardJones", "PotentialSwitch", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecNone_VdwCSTab_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecNone_VdwCSTab_GeomP1P1_VF_avx_256_single", "avx_256_single", "None", "None", "CubicSplineTable", "None", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecNone_VdwCSTab_GeomP1P1_F_avx_256_single, "nb_kernel_ElecNone_VdwCSTab_GeomP1P1_F_avx_256_single", "avx_256_single", "None", "None", "CubicSplineTable", "None", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecEw_VdwLJ_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecEw_VdwLJ_GeomP1P1_VF_avx_256_single", "avx_256_single", "Ewald", "None", "LennardJones", "None", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecEw_VdwLJ_GeomP1P1_F_avx_256_single, "nb_kernel_ElecEw_VdwLJ_GeomP1P1_F_avx_256_single", "avx_256_single", "Ewald", "None", "LennardJones", "None", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecEw_VdwLJ_GeomW3P1_VF_avx_256_single, "nb_kernel_ElecEw_VdwLJ_GeomW3P1_VF_avx_256_single", "avx_256_single", "Ewald", "None", "LennardJones", "None", "Water3Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecEw_VdwLJ_GeomW3P1_F_avx_256_single, "nb_kernel_ElecEw_VdwLJ_GeomW3P1_F_avx_256_single", "avx_256_single", "Ewald", "None", "LennardJones", "None", "Water3Particle", "", "Force" },
+ { nb_kernel_ElecEw_VdwLJ_GeomW3W3_VF_avx_256_single, "nb_kernel_ElecEw_VdwLJ_GeomW3W3_VF_avx_256_single", "avx_256_single", "Ewald", "None", "LennardJones", "None", "Water3Water3", "", "PotentialAndForce" },
+ { nb_kernel_ElecEw_VdwLJ_GeomW3W3_F_avx_256_single, "nb_kernel_ElecEw_VdwLJ_GeomW3W3_F_avx_256_single", "avx_256_single", "Ewald", "None", "LennardJones", "None", "Water3Water3", "", "Force" },
+ { nb_kernel_ElecEw_VdwLJ_GeomW4P1_VF_avx_256_single, "nb_kernel_ElecEw_VdwLJ_GeomW4P1_VF_avx_256_single", "avx_256_single", "Ewald", "None", "LennardJones", "None", "Water4Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecEw_VdwLJ_GeomW4P1_F_avx_256_single, "nb_kernel_ElecEw_VdwLJ_GeomW4P1_F_avx_256_single", "avx_256_single", "Ewald", "None", "LennardJones", "None", "Water4Particle", "", "Force" },
+ { nb_kernel_ElecEw_VdwLJ_GeomW4W4_VF_avx_256_single, "nb_kernel_ElecEw_VdwLJ_GeomW4W4_VF_avx_256_single", "avx_256_single", "Ewald", "None", "LennardJones", "None", "Water4Water4", "", "PotentialAndForce" },
+ { nb_kernel_ElecEw_VdwLJ_GeomW4W4_F_avx_256_single, "nb_kernel_ElecEw_VdwLJ_GeomW4W4_F_avx_256_single", "avx_256_single", "Ewald", "None", "LennardJones", "None", "Water4Water4", "", "Force" },
+ { nb_kernel_ElecEw_VdwNone_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecEw_VdwNone_GeomP1P1_VF_avx_256_single", "avx_256_single", "Ewald", "None", "None", "None", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecEw_VdwNone_GeomP1P1_F_avx_256_single, "nb_kernel_ElecEw_VdwNone_GeomP1P1_F_avx_256_single", "avx_256_single", "Ewald", "None", "None", "None", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecEw_VdwNone_GeomW3P1_VF_avx_256_single, "nb_kernel_ElecEw_VdwNone_GeomW3P1_VF_avx_256_single", "avx_256_single", "Ewald", "None", "None", "None", "Water3Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecEw_VdwNone_GeomW3P1_F_avx_256_single, "nb_kernel_ElecEw_VdwNone_GeomW3P1_F_avx_256_single", "avx_256_single", "Ewald", "None", "None", "None", "Water3Particle", "", "Force" },
+ { nb_kernel_ElecEw_VdwNone_GeomW3W3_VF_avx_256_single, "nb_kernel_ElecEw_VdwNone_GeomW3W3_VF_avx_256_single", "avx_256_single", "Ewald", "None", "None", "None", "Water3Water3", "", "PotentialAndForce" },
+ { nb_kernel_ElecEw_VdwNone_GeomW3W3_F_avx_256_single, "nb_kernel_ElecEw_VdwNone_GeomW3W3_F_avx_256_single", "avx_256_single", "Ewald", "None", "None", "None", "Water3Water3", "", "Force" },
+ { nb_kernel_ElecEw_VdwNone_GeomW4P1_VF_avx_256_single, "nb_kernel_ElecEw_VdwNone_GeomW4P1_VF_avx_256_single", "avx_256_single", "Ewald", "None", "None", "None", "Water4Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecEw_VdwNone_GeomW4P1_F_avx_256_single, "nb_kernel_ElecEw_VdwNone_GeomW4P1_F_avx_256_single", "avx_256_single", "Ewald", "None", "None", "None", "Water4Particle", "", "Force" },
+ { nb_kernel_ElecEw_VdwNone_GeomW4W4_VF_avx_256_single, "nb_kernel_ElecEw_VdwNone_GeomW4W4_VF_avx_256_single", "avx_256_single", "Ewald", "None", "None", "None", "Water4Water4", "", "PotentialAndForce" },
+ { nb_kernel_ElecEw_VdwNone_GeomW4W4_F_avx_256_single, "nb_kernel_ElecEw_VdwNone_GeomW4W4_F_avx_256_single", "avx_256_single", "Ewald", "None", "None", "None", "Water4Water4", "", "Force" },
+ { nb_kernel_ElecEw_VdwCSTab_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecEw_VdwCSTab_GeomP1P1_VF_avx_256_single", "avx_256_single", "Ewald", "None", "CubicSplineTable", "None", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecEw_VdwCSTab_GeomP1P1_F_avx_256_single, "nb_kernel_ElecEw_VdwCSTab_GeomP1P1_F_avx_256_single", "avx_256_single", "Ewald", "None", "CubicSplineTable", "None", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecEw_VdwCSTab_GeomW3P1_VF_avx_256_single, "nb_kernel_ElecEw_VdwCSTab_GeomW3P1_VF_avx_256_single", "avx_256_single", "Ewald", "None", "CubicSplineTable", "None", "Water3Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecEw_VdwCSTab_GeomW3P1_F_avx_256_single, "nb_kernel_ElecEw_VdwCSTab_GeomW3P1_F_avx_256_single", "avx_256_single", "Ewald", "None", "CubicSplineTable", "None", "Water3Particle", "", "Force" },
+ { nb_kernel_ElecEw_VdwCSTab_GeomW3W3_VF_avx_256_single, "nb_kernel_ElecEw_VdwCSTab_GeomW3W3_VF_avx_256_single", "avx_256_single", "Ewald", "None", "CubicSplineTable", "None", "Water3Water3", "", "PotentialAndForce" },
+ { nb_kernel_ElecEw_VdwCSTab_GeomW3W3_F_avx_256_single, "nb_kernel_ElecEw_VdwCSTab_GeomW3W3_F_avx_256_single", "avx_256_single", "Ewald", "None", "CubicSplineTable", "None", "Water3Water3", "", "Force" },
+ { nb_kernel_ElecEw_VdwCSTab_GeomW4P1_VF_avx_256_single, "nb_kernel_ElecEw_VdwCSTab_GeomW4P1_VF_avx_256_single", "avx_256_single", "Ewald", "None", "CubicSplineTable", "None", "Water4Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecEw_VdwCSTab_GeomW4P1_F_avx_256_single, "nb_kernel_ElecEw_VdwCSTab_GeomW4P1_F_avx_256_single", "avx_256_single", "Ewald", "None", "CubicSplineTable", "None", "Water4Particle", "", "Force" },
+ { nb_kernel_ElecEw_VdwCSTab_GeomW4W4_VF_avx_256_single, "nb_kernel_ElecEw_VdwCSTab_GeomW4W4_VF_avx_256_single", "avx_256_single", "Ewald", "None", "CubicSplineTable", "None", "Water4Water4", "", "PotentialAndForce" },
+ { nb_kernel_ElecEw_VdwCSTab_GeomW4W4_F_avx_256_single, "nb_kernel_ElecEw_VdwCSTab_GeomW4W4_F_avx_256_single", "avx_256_single", "Ewald", "None", "CubicSplineTable", "None", "Water4Water4", "", "Force" },
+ { nb_kernel_ElecEwSh_VdwLJSh_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecEwSh_VdwLJSh_GeomP1P1_VF_avx_256_single", "avx_256_single", "Ewald", "PotentialShift", "LennardJones", "PotentialShift", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecEwSh_VdwLJSh_GeomP1P1_F_avx_256_single, "nb_kernel_ElecEwSh_VdwLJSh_GeomP1P1_F_avx_256_single", "avx_256_single", "Ewald", "PotentialShift", "LennardJones", "PotentialShift", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_VF_avx_256_single, "nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_VF_avx_256_single", "avx_256_single", "Ewald", "PotentialShift", "LennardJones", "PotentialShift", "Water3Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_F_avx_256_single, "nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_F_avx_256_single", "avx_256_single", "Ewald", "PotentialShift", "LennardJones", "PotentialShift", "Water3Particle", "", "Force" },
+ { nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_VF_avx_256_single, "nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_VF_avx_256_single", "avx_256_single", "Ewald", "PotentialShift", "LennardJones", "PotentialShift", "Water3Water3", "", "PotentialAndForce" },
+ { nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_F_avx_256_single, "nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_F_avx_256_single", "avx_256_single", "Ewald", "PotentialShift", "LennardJones", "PotentialShift", "Water3Water3", "", "Force" },
+ { nb_kernel_ElecEwSh_VdwLJSh_GeomW4P1_VF_avx_256_single, "nb_kernel_ElecEwSh_VdwLJSh_GeomW4P1_VF_avx_256_single", "avx_256_single", "Ewald", "PotentialShift", "LennardJones", "PotentialShift", "Water4Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecEwSh_VdwLJSh_GeomW4P1_F_avx_256_single, "nb_kernel_ElecEwSh_VdwLJSh_GeomW4P1_F_avx_256_single", "avx_256_single", "Ewald", "PotentialShift", "LennardJones", "PotentialShift", "Water4Particle", "", "Force" },
+ { nb_kernel_ElecEwSh_VdwLJSh_GeomW4W4_VF_avx_256_single, "nb_kernel_ElecEwSh_VdwLJSh_GeomW4W4_VF_avx_256_single", "avx_256_single", "Ewald", "PotentialShift", "LennardJones", "PotentialShift", "Water4Water4", "", "PotentialAndForce" },
+ { nb_kernel_ElecEwSh_VdwLJSh_GeomW4W4_F_avx_256_single, "nb_kernel_ElecEwSh_VdwLJSh_GeomW4W4_F_avx_256_single", "avx_256_single", "Ewald", "PotentialShift", "LennardJones", "PotentialShift", "Water4Water4", "", "Force" },
+ { nb_kernel_ElecEwSh_VdwNone_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecEwSh_VdwNone_GeomP1P1_VF_avx_256_single", "avx_256_single", "Ewald", "PotentialShift", "None", "None", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecEwSh_VdwNone_GeomP1P1_F_avx_256_single, "nb_kernel_ElecEwSh_VdwNone_GeomP1P1_F_avx_256_single", "avx_256_single", "Ewald", "PotentialShift", "None", "None", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecEwSh_VdwNone_GeomW3P1_VF_avx_256_single, "nb_kernel_ElecEwSh_VdwNone_GeomW3P1_VF_avx_256_single", "avx_256_single", "Ewald", "PotentialShift", "None", "None", "Water3Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecEwSh_VdwNone_GeomW3P1_F_avx_256_single, "nb_kernel_ElecEwSh_VdwNone_GeomW3P1_F_avx_256_single", "avx_256_single", "Ewald", "PotentialShift", "None", "None", "Water3Particle", "", "Force" },
+ { nb_kernel_ElecEwSh_VdwNone_GeomW3W3_VF_avx_256_single, "nb_kernel_ElecEwSh_VdwNone_GeomW3W3_VF_avx_256_single", "avx_256_single", "Ewald", "PotentialShift", "None", "None", "Water3Water3", "", "PotentialAndForce" },
+ { nb_kernel_ElecEwSh_VdwNone_GeomW3W3_F_avx_256_single, "nb_kernel_ElecEwSh_VdwNone_GeomW3W3_F_avx_256_single", "avx_256_single", "Ewald", "PotentialShift", "None", "None", "Water3Water3", "", "Force" },
+ { nb_kernel_ElecEwSh_VdwNone_GeomW4P1_VF_avx_256_single, "nb_kernel_ElecEwSh_VdwNone_GeomW4P1_VF_avx_256_single", "avx_256_single", "Ewald", "PotentialShift", "None", "None", "Water4Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecEwSh_VdwNone_GeomW4P1_F_avx_256_single, "nb_kernel_ElecEwSh_VdwNone_GeomW4P1_F_avx_256_single", "avx_256_single", "Ewald", "PotentialShift", "None", "None", "Water4Particle", "", "Force" },
+ { nb_kernel_ElecEwSh_VdwNone_GeomW4W4_VF_avx_256_single, "nb_kernel_ElecEwSh_VdwNone_GeomW4W4_VF_avx_256_single", "avx_256_single", "Ewald", "PotentialShift", "None", "None", "Water4Water4", "", "PotentialAndForce" },
+ { nb_kernel_ElecEwSh_VdwNone_GeomW4W4_F_avx_256_single, "nb_kernel_ElecEwSh_VdwNone_GeomW4W4_F_avx_256_single", "avx_256_single", "Ewald", "PotentialShift", "None", "None", "Water4Water4", "", "Force" },
+ { nb_kernel_ElecEwSw_VdwLJSw_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecEwSw_VdwLJSw_GeomP1P1_VF_avx_256_single", "avx_256_single", "Ewald", "PotentialSwitch", "LennardJones", "PotentialSwitch", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecEwSw_VdwLJSw_GeomP1P1_F_avx_256_single, "nb_kernel_ElecEwSw_VdwLJSw_GeomP1P1_F_avx_256_single", "avx_256_single", "Ewald", "PotentialSwitch", "LennardJones", "PotentialSwitch", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecEwSw_VdwLJSw_GeomW3P1_VF_avx_256_single, "nb_kernel_ElecEwSw_VdwLJSw_GeomW3P1_VF_avx_256_single", "avx_256_single", "Ewald", "PotentialSwitch", "LennardJones", "PotentialSwitch", "Water3Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecEwSw_VdwLJSw_GeomW3P1_F_avx_256_single, "nb_kernel_ElecEwSw_VdwLJSw_GeomW3P1_F_avx_256_single", "avx_256_single", "Ewald", "PotentialSwitch", "LennardJones", "PotentialSwitch", "Water3Particle", "", "Force" },
+ { nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_VF_avx_256_single, "nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_VF_avx_256_single", "avx_256_single", "Ewald", "PotentialSwitch", "LennardJones", "PotentialSwitch", "Water3Water3", "", "PotentialAndForce" },
+ { nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_F_avx_256_single, "nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_F_avx_256_single", "avx_256_single", "Ewald", "PotentialSwitch", "LennardJones", "PotentialSwitch", "Water3Water3", "", "Force" },
+ { nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_VF_avx_256_single, "nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_VF_avx_256_single", "avx_256_single", "Ewald", "PotentialSwitch", "LennardJones", "PotentialSwitch", "Water4Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_F_avx_256_single, "nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_F_avx_256_single", "avx_256_single", "Ewald", "PotentialSwitch", "LennardJones", "PotentialSwitch", "Water4Particle", "", "Force" },
+ { nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_VF_avx_256_single, "nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_VF_avx_256_single", "avx_256_single", "Ewald", "PotentialSwitch", "LennardJones", "PotentialSwitch", "Water4Water4", "", "PotentialAndForce" },
+ { nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_F_avx_256_single, "nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_F_avx_256_single", "avx_256_single", "Ewald", "PotentialSwitch", "LennardJones", "PotentialSwitch", "Water4Water4", "", "Force" },
+ { nb_kernel_ElecEwSw_VdwNone_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecEwSw_VdwNone_GeomP1P1_VF_avx_256_single", "avx_256_single", "Ewald", "PotentialSwitch", "None", "None", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecEwSw_VdwNone_GeomP1P1_F_avx_256_single, "nb_kernel_ElecEwSw_VdwNone_GeomP1P1_F_avx_256_single", "avx_256_single", "Ewald", "PotentialSwitch", "None", "None", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecEwSw_VdwNone_GeomW3P1_VF_avx_256_single, "nb_kernel_ElecEwSw_VdwNone_GeomW3P1_VF_avx_256_single", "avx_256_single", "Ewald", "PotentialSwitch", "None", "None", "Water3Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecEwSw_VdwNone_GeomW3P1_F_avx_256_single, "nb_kernel_ElecEwSw_VdwNone_GeomW3P1_F_avx_256_single", "avx_256_single", "Ewald", "PotentialSwitch", "None", "None", "Water3Particle", "", "Force" },
+ { nb_kernel_ElecEwSw_VdwNone_GeomW3W3_VF_avx_256_single, "nb_kernel_ElecEwSw_VdwNone_GeomW3W3_VF_avx_256_single", "avx_256_single", "Ewald", "PotentialSwitch", "None", "None", "Water3Water3", "", "PotentialAndForce" },
+ { nb_kernel_ElecEwSw_VdwNone_GeomW3W3_F_avx_256_single, "nb_kernel_ElecEwSw_VdwNone_GeomW3W3_F_avx_256_single", "avx_256_single", "Ewald", "PotentialSwitch", "None", "None", "Water3Water3", "", "Force" },
+ { nb_kernel_ElecEwSw_VdwNone_GeomW4P1_VF_avx_256_single, "nb_kernel_ElecEwSw_VdwNone_GeomW4P1_VF_avx_256_single", "avx_256_single", "Ewald", "PotentialSwitch", "None", "None", "Water4Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecEwSw_VdwNone_GeomW4P1_F_avx_256_single, "nb_kernel_ElecEwSw_VdwNone_GeomW4P1_F_avx_256_single", "avx_256_single", "Ewald", "PotentialSwitch", "None", "None", "Water4Particle", "", "Force" },
+ { nb_kernel_ElecEwSw_VdwNone_GeomW4W4_VF_avx_256_single, "nb_kernel_ElecEwSw_VdwNone_GeomW4W4_VF_avx_256_single", "avx_256_single", "Ewald", "PotentialSwitch", "None", "None", "Water4Water4", "", "PotentialAndForce" },
+ { nb_kernel_ElecEwSw_VdwNone_GeomW4W4_F_avx_256_single, "nb_kernel_ElecEwSw_VdwNone_GeomW4W4_F_avx_256_single", "avx_256_single", "Ewald", "PotentialSwitch", "None", "None", "Water4Water4", "", "Force" },
+ { nb_kernel_ElecCoul_VdwLJ_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecCoul_VdwLJ_GeomP1P1_VF_avx_256_single", "avx_256_single", "Coulomb", "None", "LennardJones", "None", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecCoul_VdwLJ_GeomP1P1_F_avx_256_single, "nb_kernel_ElecCoul_VdwLJ_GeomP1P1_F_avx_256_single", "avx_256_single", "Coulomb", "None", "LennardJones", "None", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecCoul_VdwLJ_GeomW3P1_VF_avx_256_single, "nb_kernel_ElecCoul_VdwLJ_GeomW3P1_VF_avx_256_single", "avx_256_single", "Coulomb", "None", "LennardJones", "None", "Water3Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecCoul_VdwLJ_GeomW3P1_F_avx_256_single, "nb_kernel_ElecCoul_VdwLJ_GeomW3P1_F_avx_256_single", "avx_256_single", "Coulomb", "None", "LennardJones", "None", "Water3Particle", "", "Force" },
+ { nb_kernel_ElecCoul_VdwLJ_GeomW3W3_VF_avx_256_single, "nb_kernel_ElecCoul_VdwLJ_GeomW3W3_VF_avx_256_single", "avx_256_single", "Coulomb", "None", "LennardJones", "None", "Water3Water3", "", "PotentialAndForce" },
+ { nb_kernel_ElecCoul_VdwLJ_GeomW3W3_F_avx_256_single, "nb_kernel_ElecCoul_VdwLJ_GeomW3W3_F_avx_256_single", "avx_256_single", "Coulomb", "None", "LennardJones", "None", "Water3Water3", "", "Force" },
+ { nb_kernel_ElecCoul_VdwLJ_GeomW4P1_VF_avx_256_single, "nb_kernel_ElecCoul_VdwLJ_GeomW4P1_VF_avx_256_single", "avx_256_single", "Coulomb", "None", "LennardJones", "None", "Water4Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecCoul_VdwLJ_GeomW4P1_F_avx_256_single, "nb_kernel_ElecCoul_VdwLJ_GeomW4P1_F_avx_256_single", "avx_256_single", "Coulomb", "None", "LennardJones", "None", "Water4Particle", "", "Force" },
+ { nb_kernel_ElecCoul_VdwLJ_GeomW4W4_VF_avx_256_single, "nb_kernel_ElecCoul_VdwLJ_GeomW4W4_VF_avx_256_single", "avx_256_single", "Coulomb", "None", "LennardJones", "None", "Water4Water4", "", "PotentialAndForce" },
+ { nb_kernel_ElecCoul_VdwLJ_GeomW4W4_F_avx_256_single, "nb_kernel_ElecCoul_VdwLJ_GeomW4W4_F_avx_256_single", "avx_256_single", "Coulomb", "None", "LennardJones", "None", "Water4Water4", "", "Force" },
+ { nb_kernel_ElecCoul_VdwNone_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecCoul_VdwNone_GeomP1P1_VF_avx_256_single", "avx_256_single", "Coulomb", "None", "None", "None", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecCoul_VdwNone_GeomP1P1_F_avx_256_single, "nb_kernel_ElecCoul_VdwNone_GeomP1P1_F_avx_256_single", "avx_256_single", "Coulomb", "None", "None", "None", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecCoul_VdwNone_GeomW3P1_VF_avx_256_single, "nb_kernel_ElecCoul_VdwNone_GeomW3P1_VF_avx_256_single", "avx_256_single", "Coulomb", "None", "None", "None", "Water3Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecCoul_VdwNone_GeomW3P1_F_avx_256_single, "nb_kernel_ElecCoul_VdwNone_GeomW3P1_F_avx_256_single", "avx_256_single", "Coulomb", "None", "None", "None", "Water3Particle", "", "Force" },
+ { nb_kernel_ElecCoul_VdwNone_GeomW3W3_VF_avx_256_single, "nb_kernel_ElecCoul_VdwNone_GeomW3W3_VF_avx_256_single", "avx_256_single", "Coulomb", "None", "None", "None", "Water3Water3", "", "PotentialAndForce" },
+ { nb_kernel_ElecCoul_VdwNone_GeomW3W3_F_avx_256_single, "nb_kernel_ElecCoul_VdwNone_GeomW3W3_F_avx_256_single", "avx_256_single", "Coulomb", "None", "None", "None", "Water3Water3", "", "Force" },
+ { nb_kernel_ElecCoul_VdwNone_GeomW4P1_VF_avx_256_single, "nb_kernel_ElecCoul_VdwNone_GeomW4P1_VF_avx_256_single", "avx_256_single", "Coulomb", "None", "None", "None", "Water4Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecCoul_VdwNone_GeomW4P1_F_avx_256_single, "nb_kernel_ElecCoul_VdwNone_GeomW4P1_F_avx_256_single", "avx_256_single", "Coulomb", "None", "None", "None", "Water4Particle", "", "Force" },
+ { nb_kernel_ElecCoul_VdwNone_GeomW4W4_VF_avx_256_single, "nb_kernel_ElecCoul_VdwNone_GeomW4W4_VF_avx_256_single", "avx_256_single", "Coulomb", "None", "None", "None", "Water4Water4", "", "PotentialAndForce" },
+ { nb_kernel_ElecCoul_VdwNone_GeomW4W4_F_avx_256_single, "nb_kernel_ElecCoul_VdwNone_GeomW4W4_F_avx_256_single", "avx_256_single", "Coulomb", "None", "None", "None", "Water4Water4", "", "Force" },
+ { nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_VF_avx_256_single", "avx_256_single", "Coulomb", "None", "CubicSplineTable", "None", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_F_avx_256_single, "nb_kernel_ElecCoul_VdwCSTab_GeomP1P1_F_avx_256_single", "avx_256_single", "Coulomb", "None", "CubicSplineTable", "None", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecCoul_VdwCSTab_GeomW3P1_VF_avx_256_single, "nb_kernel_ElecCoul_VdwCSTab_GeomW3P1_VF_avx_256_single", "avx_256_single", "Coulomb", "None", "CubicSplineTable", "None", "Water3Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecCoul_VdwCSTab_GeomW3P1_F_avx_256_single, "nb_kernel_ElecCoul_VdwCSTab_GeomW3P1_F_avx_256_single", "avx_256_single", "Coulomb", "None", "CubicSplineTable", "None", "Water3Particle", "", "Force" },
+ { nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_VF_avx_256_single, "nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_VF_avx_256_single", "avx_256_single", "Coulomb", "None", "CubicSplineTable", "None", "Water3Water3", "", "PotentialAndForce" },
+ { nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_F_avx_256_single, "nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_F_avx_256_single", "avx_256_single", "Coulomb", "None", "CubicSplineTable", "None", "Water3Water3", "", "Force" },
+ { nb_kernel_ElecCoul_VdwCSTab_GeomW4P1_VF_avx_256_single, "nb_kernel_ElecCoul_VdwCSTab_GeomW4P1_VF_avx_256_single", "avx_256_single", "Coulomb", "None", "CubicSplineTable", "None", "Water4Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecCoul_VdwCSTab_GeomW4P1_F_avx_256_single, "nb_kernel_ElecCoul_VdwCSTab_GeomW4P1_F_avx_256_single", "avx_256_single", "Coulomb", "None", "CubicSplineTable", "None", "Water4Particle", "", "Force" },
+ { nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_VF_avx_256_single, "nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_VF_avx_256_single", "avx_256_single", "Coulomb", "None", "CubicSplineTable", "None", "Water4Water4", "", "PotentialAndForce" },
+ { nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_F_avx_256_single, "nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_F_avx_256_single", "avx_256_single", "Coulomb", "None", "CubicSplineTable", "None", "Water4Water4", "", "Force" },
+ { nb_kernel_ElecCSTab_VdwLJ_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecCSTab_VdwLJ_GeomP1P1_VF_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "LennardJones", "None", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecCSTab_VdwLJ_GeomP1P1_F_avx_256_single, "nb_kernel_ElecCSTab_VdwLJ_GeomP1P1_F_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "LennardJones", "None", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecCSTab_VdwLJ_GeomW3P1_VF_avx_256_single, "nb_kernel_ElecCSTab_VdwLJ_GeomW3P1_VF_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "LennardJones", "None", "Water3Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecCSTab_VdwLJ_GeomW3P1_F_avx_256_single, "nb_kernel_ElecCSTab_VdwLJ_GeomW3P1_F_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "LennardJones", "None", "Water3Particle", "", "Force" },
+ { nb_kernel_ElecCSTab_VdwLJ_GeomW3W3_VF_avx_256_single, "nb_kernel_ElecCSTab_VdwLJ_GeomW3W3_VF_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "LennardJones", "None", "Water3Water3", "", "PotentialAndForce" },
+ { nb_kernel_ElecCSTab_VdwLJ_GeomW3W3_F_avx_256_single, "nb_kernel_ElecCSTab_VdwLJ_GeomW3W3_F_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "LennardJones", "None", "Water3Water3", "", "Force" },
+ { nb_kernel_ElecCSTab_VdwLJ_GeomW4P1_VF_avx_256_single, "nb_kernel_ElecCSTab_VdwLJ_GeomW4P1_VF_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "LennardJones", "None", "Water4Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecCSTab_VdwLJ_GeomW4P1_F_avx_256_single, "nb_kernel_ElecCSTab_VdwLJ_GeomW4P1_F_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "LennardJones", "None", "Water4Particle", "", "Force" },
+ { nb_kernel_ElecCSTab_VdwLJ_GeomW4W4_VF_avx_256_single, "nb_kernel_ElecCSTab_VdwLJ_GeomW4W4_VF_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "LennardJones", "None", "Water4Water4", "", "PotentialAndForce" },
+ { nb_kernel_ElecCSTab_VdwLJ_GeomW4W4_F_avx_256_single, "nb_kernel_ElecCSTab_VdwLJ_GeomW4W4_F_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "LennardJones", "None", "Water4Water4", "", "Force" },
+ { nb_kernel_ElecCSTab_VdwNone_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecCSTab_VdwNone_GeomP1P1_VF_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "None", "None", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecCSTab_VdwNone_GeomP1P1_F_avx_256_single, "nb_kernel_ElecCSTab_VdwNone_GeomP1P1_F_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "None", "None", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecCSTab_VdwNone_GeomW3P1_VF_avx_256_single, "nb_kernel_ElecCSTab_VdwNone_GeomW3P1_VF_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "None", "None", "Water3Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecCSTab_VdwNone_GeomW3P1_F_avx_256_single, "nb_kernel_ElecCSTab_VdwNone_GeomW3P1_F_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "None", "None", "Water3Particle", "", "Force" },
+ { nb_kernel_ElecCSTab_VdwNone_GeomW3W3_VF_avx_256_single, "nb_kernel_ElecCSTab_VdwNone_GeomW3W3_VF_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "None", "None", "Water3Water3", "", "PotentialAndForce" },
+ { nb_kernel_ElecCSTab_VdwNone_GeomW3W3_F_avx_256_single, "nb_kernel_ElecCSTab_VdwNone_GeomW3W3_F_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "None", "None", "Water3Water3", "", "Force" },
+ { nb_kernel_ElecCSTab_VdwNone_GeomW4P1_VF_avx_256_single, "nb_kernel_ElecCSTab_VdwNone_GeomW4P1_VF_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "None", "None", "Water4Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecCSTab_VdwNone_GeomW4P1_F_avx_256_single, "nb_kernel_ElecCSTab_VdwNone_GeomW4P1_F_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "None", "None", "Water4Particle", "", "Force" },
+ { nb_kernel_ElecCSTab_VdwNone_GeomW4W4_VF_avx_256_single, "nb_kernel_ElecCSTab_VdwNone_GeomW4W4_VF_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "None", "None", "Water4Water4", "", "PotentialAndForce" },
+ { nb_kernel_ElecCSTab_VdwNone_GeomW4W4_F_avx_256_single, "nb_kernel_ElecCSTab_VdwNone_GeomW4W4_F_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "None", "None", "Water4Water4", "", "Force" },
+ { nb_kernel_ElecCSTab_VdwCSTab_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecCSTab_VdwCSTab_GeomP1P1_VF_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "CubicSplineTable", "None", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecCSTab_VdwCSTab_GeomP1P1_F_avx_256_single, "nb_kernel_ElecCSTab_VdwCSTab_GeomP1P1_F_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "CubicSplineTable", "None", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecCSTab_VdwCSTab_GeomW3P1_VF_avx_256_single, "nb_kernel_ElecCSTab_VdwCSTab_GeomW3P1_VF_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "CubicSplineTable", "None", "Water3Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecCSTab_VdwCSTab_GeomW3P1_F_avx_256_single, "nb_kernel_ElecCSTab_VdwCSTab_GeomW3P1_F_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "CubicSplineTable", "None", "Water3Particle", "", "Force" },
+ { nb_kernel_ElecCSTab_VdwCSTab_GeomW3W3_VF_avx_256_single, "nb_kernel_ElecCSTab_VdwCSTab_GeomW3W3_VF_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "CubicSplineTable", "None", "Water3Water3", "", "PotentialAndForce" },
+ { nb_kernel_ElecCSTab_VdwCSTab_GeomW3W3_F_avx_256_single, "nb_kernel_ElecCSTab_VdwCSTab_GeomW3W3_F_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "CubicSplineTable", "None", "Water3Water3", "", "Force" },
+ { nb_kernel_ElecCSTab_VdwCSTab_GeomW4P1_VF_avx_256_single, "nb_kernel_ElecCSTab_VdwCSTab_GeomW4P1_VF_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "CubicSplineTable", "None", "Water4Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecCSTab_VdwCSTab_GeomW4P1_F_avx_256_single, "nb_kernel_ElecCSTab_VdwCSTab_GeomW4P1_F_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "CubicSplineTable", "None", "Water4Particle", "", "Force" },
+ { nb_kernel_ElecCSTab_VdwCSTab_GeomW4W4_VF_avx_256_single, "nb_kernel_ElecCSTab_VdwCSTab_GeomW4W4_VF_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "CubicSplineTable", "None", "Water4Water4", "", "PotentialAndForce" },
+ { nb_kernel_ElecCSTab_VdwCSTab_GeomW4W4_F_avx_256_single, "nb_kernel_ElecCSTab_VdwCSTab_GeomW4W4_F_avx_256_single", "avx_256_single", "CubicSplineTable", "None", "CubicSplineTable", "None", "Water4Water4", "", "Force" },
+ { nb_kernel_ElecGB_VdwLJ_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecGB_VdwLJ_GeomP1P1_VF_avx_256_single", "avx_256_single", "GeneralizedBorn", "None", "LennardJones", "None", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecGB_VdwLJ_GeomP1P1_F_avx_256_single, "nb_kernel_ElecGB_VdwLJ_GeomP1P1_F_avx_256_single", "avx_256_single", "GeneralizedBorn", "None", "LennardJones", "None", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecGB_VdwNone_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecGB_VdwNone_GeomP1P1_VF_avx_256_single", "avx_256_single", "GeneralizedBorn", "None", "None", "None", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecGB_VdwNone_GeomP1P1_F_avx_256_single, "nb_kernel_ElecGB_VdwNone_GeomP1P1_F_avx_256_single", "avx_256_single", "GeneralizedBorn", "None", "None", "None", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecGB_VdwCSTab_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecGB_VdwCSTab_GeomP1P1_VF_avx_256_single", "avx_256_single", "GeneralizedBorn", "None", "CubicSplineTable", "None", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecGB_VdwCSTab_GeomP1P1_F_avx_256_single, "nb_kernel_ElecGB_VdwCSTab_GeomP1P1_F_avx_256_single", "avx_256_single", "GeneralizedBorn", "None", "CubicSplineTable", "None", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecRFCut_VdwLJSh_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecRFCut_VdwLJSh_GeomP1P1_VF_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "LennardJones", "PotentialShift", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecRFCut_VdwLJSh_GeomP1P1_F_avx_256_single, "nb_kernel_ElecRFCut_VdwLJSh_GeomP1P1_F_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "LennardJones", "PotentialShift", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecRFCut_VdwLJSh_GeomW3P1_VF_avx_256_single, "nb_kernel_ElecRFCut_VdwLJSh_GeomW3P1_VF_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "LennardJones", "PotentialShift", "Water3Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecRFCut_VdwLJSh_GeomW3P1_F_avx_256_single, "nb_kernel_ElecRFCut_VdwLJSh_GeomW3P1_F_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "LennardJones", "PotentialShift", "Water3Particle", "", "Force" },
+ { nb_kernel_ElecRFCut_VdwLJSh_GeomW3W3_VF_avx_256_single, "nb_kernel_ElecRFCut_VdwLJSh_GeomW3W3_VF_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "LennardJones", "PotentialShift", "Water3Water3", "", "PotentialAndForce" },
+ { nb_kernel_ElecRFCut_VdwLJSh_GeomW3W3_F_avx_256_single, "nb_kernel_ElecRFCut_VdwLJSh_GeomW3W3_F_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "LennardJones", "PotentialShift", "Water3Water3", "", "Force" },
+ { nb_kernel_ElecRFCut_VdwLJSh_GeomW4P1_VF_avx_256_single, "nb_kernel_ElecRFCut_VdwLJSh_GeomW4P1_VF_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "LennardJones", "PotentialShift", "Water4Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecRFCut_VdwLJSh_GeomW4P1_F_avx_256_single, "nb_kernel_ElecRFCut_VdwLJSh_GeomW4P1_F_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "LennardJones", "PotentialShift", "Water4Particle", "", "Force" },
+ { nb_kernel_ElecRFCut_VdwLJSh_GeomW4W4_VF_avx_256_single, "nb_kernel_ElecRFCut_VdwLJSh_GeomW4W4_VF_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "LennardJones", "PotentialShift", "Water4Water4", "", "PotentialAndForce" },
+ { nb_kernel_ElecRFCut_VdwLJSh_GeomW4W4_F_avx_256_single, "nb_kernel_ElecRFCut_VdwLJSh_GeomW4W4_F_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "LennardJones", "PotentialShift", "Water4Water4", "", "Force" },
+ { nb_kernel_ElecRFCut_VdwLJSw_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecRFCut_VdwLJSw_GeomP1P1_VF_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "LennardJones", "PotentialSwitch", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecRFCut_VdwLJSw_GeomP1P1_F_avx_256_single, "nb_kernel_ElecRFCut_VdwLJSw_GeomP1P1_F_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "LennardJones", "PotentialSwitch", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecRFCut_VdwLJSw_GeomW3P1_VF_avx_256_single, "nb_kernel_ElecRFCut_VdwLJSw_GeomW3P1_VF_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "LennardJones", "PotentialSwitch", "Water3Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecRFCut_VdwLJSw_GeomW3P1_F_avx_256_single, "nb_kernel_ElecRFCut_VdwLJSw_GeomW3P1_F_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "LennardJones", "PotentialSwitch", "Water3Particle", "", "Force" },
+ { nb_kernel_ElecRFCut_VdwLJSw_GeomW3W3_VF_avx_256_single, "nb_kernel_ElecRFCut_VdwLJSw_GeomW3W3_VF_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "LennardJones", "PotentialSwitch", "Water3Water3", "", "PotentialAndForce" },
+ { nb_kernel_ElecRFCut_VdwLJSw_GeomW3W3_F_avx_256_single, "nb_kernel_ElecRFCut_VdwLJSw_GeomW3W3_F_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "LennardJones", "PotentialSwitch", "Water3Water3", "", "Force" },
+ { nb_kernel_ElecRFCut_VdwLJSw_GeomW4P1_VF_avx_256_single, "nb_kernel_ElecRFCut_VdwLJSw_GeomW4P1_VF_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "LennardJones", "PotentialSwitch", "Water4Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecRFCut_VdwLJSw_GeomW4P1_F_avx_256_single, "nb_kernel_ElecRFCut_VdwLJSw_GeomW4P1_F_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "LennardJones", "PotentialSwitch", "Water4Particle", "", "Force" },
+ { nb_kernel_ElecRFCut_VdwLJSw_GeomW4W4_VF_avx_256_single, "nb_kernel_ElecRFCut_VdwLJSw_GeomW4W4_VF_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "LennardJones", "PotentialSwitch", "Water4Water4", "", "PotentialAndForce" },
+ { nb_kernel_ElecRFCut_VdwLJSw_GeomW4W4_F_avx_256_single, "nb_kernel_ElecRFCut_VdwLJSw_GeomW4W4_F_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "LennardJones", "PotentialSwitch", "Water4Water4", "", "Force" },
+ { nb_kernel_ElecRFCut_VdwNone_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecRFCut_VdwNone_GeomP1P1_VF_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "None", "None", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecRFCut_VdwNone_GeomP1P1_F_avx_256_single, "nb_kernel_ElecRFCut_VdwNone_GeomP1P1_F_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "None", "None", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecRFCut_VdwNone_GeomW3P1_VF_avx_256_single, "nb_kernel_ElecRFCut_VdwNone_GeomW3P1_VF_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "None", "None", "Water3Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecRFCut_VdwNone_GeomW3P1_F_avx_256_single, "nb_kernel_ElecRFCut_VdwNone_GeomW3P1_F_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "None", "None", "Water3Particle", "", "Force" },
+ { nb_kernel_ElecRFCut_VdwNone_GeomW3W3_VF_avx_256_single, "nb_kernel_ElecRFCut_VdwNone_GeomW3W3_VF_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "None", "None", "Water3Water3", "", "PotentialAndForce" },
+ { nb_kernel_ElecRFCut_VdwNone_GeomW3W3_F_avx_256_single, "nb_kernel_ElecRFCut_VdwNone_GeomW3W3_F_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "None", "None", "Water3Water3", "", "Force" },
+ { nb_kernel_ElecRFCut_VdwNone_GeomW4P1_VF_avx_256_single, "nb_kernel_ElecRFCut_VdwNone_GeomW4P1_VF_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "None", "None", "Water4Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecRFCut_VdwNone_GeomW4P1_F_avx_256_single, "nb_kernel_ElecRFCut_VdwNone_GeomW4P1_F_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "None", "None", "Water4Particle", "", "Force" },
+ { nb_kernel_ElecRFCut_VdwNone_GeomW4W4_VF_avx_256_single, "nb_kernel_ElecRFCut_VdwNone_GeomW4W4_VF_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "None", "None", "Water4Water4", "", "PotentialAndForce" },
+ { nb_kernel_ElecRFCut_VdwNone_GeomW4W4_F_avx_256_single, "nb_kernel_ElecRFCut_VdwNone_GeomW4W4_F_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "None", "None", "Water4Water4", "", "Force" },
+ { nb_kernel_ElecRFCut_VdwCSTab_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecRFCut_VdwCSTab_GeomP1P1_VF_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "CubicSplineTable", "None", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecRFCut_VdwCSTab_GeomP1P1_F_avx_256_single, "nb_kernel_ElecRFCut_VdwCSTab_GeomP1P1_F_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "CubicSplineTable", "None", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecRFCut_VdwCSTab_GeomW3P1_VF_avx_256_single, "nb_kernel_ElecRFCut_VdwCSTab_GeomW3P1_VF_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "CubicSplineTable", "None", "Water3Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecRFCut_VdwCSTab_GeomW3P1_F_avx_256_single, "nb_kernel_ElecRFCut_VdwCSTab_GeomW3P1_F_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "CubicSplineTable", "None", "Water3Particle", "", "Force" },
+ { nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_VF_avx_256_single, "nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_VF_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "CubicSplineTable", "None", "Water3Water3", "", "PotentialAndForce" },
+ { nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_F_avx_256_single, "nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_F_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "CubicSplineTable", "None", "Water3Water3", "", "Force" },
+ { nb_kernel_ElecRFCut_VdwCSTab_GeomW4P1_VF_avx_256_single, "nb_kernel_ElecRFCut_VdwCSTab_GeomW4P1_VF_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "CubicSplineTable", "None", "Water4Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecRFCut_VdwCSTab_GeomW4P1_F_avx_256_single, "nb_kernel_ElecRFCut_VdwCSTab_GeomW4P1_F_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "CubicSplineTable", "None", "Water4Particle", "", "Force" },
+ { nb_kernel_ElecRFCut_VdwCSTab_GeomW4W4_VF_avx_256_single, "nb_kernel_ElecRFCut_VdwCSTab_GeomW4W4_VF_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "CubicSplineTable", "None", "Water4Water4", "", "PotentialAndForce" },
+ { nb_kernel_ElecRFCut_VdwCSTab_GeomW4W4_F_avx_256_single, "nb_kernel_ElecRFCut_VdwCSTab_GeomW4W4_F_avx_256_single", "avx_256_single", "ReactionField", "ExactCutoff", "CubicSplineTable", "None", "Water4Water4", "", "Force" },
+ { nb_kernel_ElecRF_VdwLJ_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecRF_VdwLJ_GeomP1P1_VF_avx_256_single", "avx_256_single", "ReactionField", "None", "LennardJones", "None", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecRF_VdwLJ_GeomP1P1_F_avx_256_single, "nb_kernel_ElecRF_VdwLJ_GeomP1P1_F_avx_256_single", "avx_256_single", "ReactionField", "None", "LennardJones", "None", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecRF_VdwLJ_GeomW3P1_VF_avx_256_single, "nb_kernel_ElecRF_VdwLJ_GeomW3P1_VF_avx_256_single", "avx_256_single", "ReactionField", "None", "LennardJones", "None", "Water3Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecRF_VdwLJ_GeomW3P1_F_avx_256_single, "nb_kernel_ElecRF_VdwLJ_GeomW3P1_F_avx_256_single", "avx_256_single", "ReactionField", "None", "LennardJones", "None", "Water3Particle", "", "Force" },
+ { nb_kernel_ElecRF_VdwLJ_GeomW3W3_VF_avx_256_single, "nb_kernel_ElecRF_VdwLJ_GeomW3W3_VF_avx_256_single", "avx_256_single", "ReactionField", "None", "LennardJones", "None", "Water3Water3", "", "PotentialAndForce" },
+ { nb_kernel_ElecRF_VdwLJ_GeomW3W3_F_avx_256_single, "nb_kernel_ElecRF_VdwLJ_GeomW3W3_F_avx_256_single", "avx_256_single", "ReactionField", "None", "LennardJones", "None", "Water3Water3", "", "Force" },
+ { nb_kernel_ElecRF_VdwLJ_GeomW4P1_VF_avx_256_single, "nb_kernel_ElecRF_VdwLJ_GeomW4P1_VF_avx_256_single", "avx_256_single", "ReactionField", "None", "LennardJones", "None", "Water4Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecRF_VdwLJ_GeomW4P1_F_avx_256_single, "nb_kernel_ElecRF_VdwLJ_GeomW4P1_F_avx_256_single", "avx_256_single", "ReactionField", "None", "LennardJones", "None", "Water4Particle", "", "Force" },
+ { nb_kernel_ElecRF_VdwLJ_GeomW4W4_VF_avx_256_single, "nb_kernel_ElecRF_VdwLJ_GeomW4W4_VF_avx_256_single", "avx_256_single", "ReactionField", "None", "LennardJones", "None", "Water4Water4", "", "PotentialAndForce" },
+ { nb_kernel_ElecRF_VdwLJ_GeomW4W4_F_avx_256_single, "nb_kernel_ElecRF_VdwLJ_GeomW4W4_F_avx_256_single", "avx_256_single", "ReactionField", "None", "LennardJones", "None", "Water4Water4", "", "Force" },
+ { nb_kernel_ElecRF_VdwNone_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecRF_VdwNone_GeomP1P1_VF_avx_256_single", "avx_256_single", "ReactionField", "None", "None", "None", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecRF_VdwNone_GeomP1P1_F_avx_256_single, "nb_kernel_ElecRF_VdwNone_GeomP1P1_F_avx_256_single", "avx_256_single", "ReactionField", "None", "None", "None", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecRF_VdwNone_GeomW3P1_VF_avx_256_single, "nb_kernel_ElecRF_VdwNone_GeomW3P1_VF_avx_256_single", "avx_256_single", "ReactionField", "None", "None", "None", "Water3Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecRF_VdwNone_GeomW3P1_F_avx_256_single, "nb_kernel_ElecRF_VdwNone_GeomW3P1_F_avx_256_single", "avx_256_single", "ReactionField", "None", "None", "None", "Water3Particle", "", "Force" },
+ { nb_kernel_ElecRF_VdwNone_GeomW3W3_VF_avx_256_single, "nb_kernel_ElecRF_VdwNone_GeomW3W3_VF_avx_256_single", "avx_256_single", "ReactionField", "None", "None", "None", "Water3Water3", "", "PotentialAndForce" },
+ { nb_kernel_ElecRF_VdwNone_GeomW3W3_F_avx_256_single, "nb_kernel_ElecRF_VdwNone_GeomW3W3_F_avx_256_single", "avx_256_single", "ReactionField", "None", "None", "None", "Water3Water3", "", "Force" },
+ { nb_kernel_ElecRF_VdwNone_GeomW4P1_VF_avx_256_single, "nb_kernel_ElecRF_VdwNone_GeomW4P1_VF_avx_256_single", "avx_256_single", "ReactionField", "None", "None", "None", "Water4Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecRF_VdwNone_GeomW4P1_F_avx_256_single, "nb_kernel_ElecRF_VdwNone_GeomW4P1_F_avx_256_single", "avx_256_single", "ReactionField", "None", "None", "None", "Water4Particle", "", "Force" },
+ { nb_kernel_ElecRF_VdwNone_GeomW4W4_VF_avx_256_single, "nb_kernel_ElecRF_VdwNone_GeomW4W4_VF_avx_256_single", "avx_256_single", "ReactionField", "None", "None", "None", "Water4Water4", "", "PotentialAndForce" },
+ { nb_kernel_ElecRF_VdwNone_GeomW4W4_F_avx_256_single, "nb_kernel_ElecRF_VdwNone_GeomW4W4_F_avx_256_single", "avx_256_single", "ReactionField", "None", "None", "None", "Water4Water4", "", "Force" },
+ { nb_kernel_ElecRF_VdwCSTab_GeomP1P1_VF_avx_256_single, "nb_kernel_ElecRF_VdwCSTab_GeomP1P1_VF_avx_256_single", "avx_256_single", "ReactionField", "None", "CubicSplineTable", "None", "ParticleParticle", "", "PotentialAndForce" },
+ { nb_kernel_ElecRF_VdwCSTab_GeomP1P1_F_avx_256_single, "nb_kernel_ElecRF_VdwCSTab_GeomP1P1_F_avx_256_single", "avx_256_single", "ReactionField", "None", "CubicSplineTable", "None", "ParticleParticle", "", "Force" },
+ { nb_kernel_ElecRF_VdwCSTab_GeomW3P1_VF_avx_256_single, "nb_kernel_ElecRF_VdwCSTab_GeomW3P1_VF_avx_256_single", "avx_256_single", "ReactionField", "None", "CubicSplineTable", "None", "Water3Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecRF_VdwCSTab_GeomW3P1_F_avx_256_single, "nb_kernel_ElecRF_VdwCSTab_GeomW3P1_F_avx_256_single", "avx_256_single", "ReactionField", "None", "CubicSplineTable", "None", "Water3Particle", "", "Force" },
+ { nb_kernel_ElecRF_VdwCSTab_GeomW3W3_VF_avx_256_single, "nb_kernel_ElecRF_VdwCSTab_GeomW3W3_VF_avx_256_single", "avx_256_single", "ReactionField", "None", "CubicSplineTable", "None", "Water3Water3", "", "PotentialAndForce" },
+ { nb_kernel_ElecRF_VdwCSTab_GeomW3W3_F_avx_256_single, "nb_kernel_ElecRF_VdwCSTab_GeomW3W3_F_avx_256_single", "avx_256_single", "ReactionField", "None", "CubicSplineTable", "None", "Water3Water3", "", "Force" },
+ { nb_kernel_ElecRF_VdwCSTab_GeomW4P1_VF_avx_256_single, "nb_kernel_ElecRF_VdwCSTab_GeomW4P1_VF_avx_256_single", "avx_256_single", "ReactionField", "None", "CubicSplineTable", "None", "Water4Particle", "", "PotentialAndForce" },
+ { nb_kernel_ElecRF_VdwCSTab_GeomW4P1_F_avx_256_single, "nb_kernel_ElecRF_VdwCSTab_GeomW4P1_F_avx_256_single", "avx_256_single", "ReactionField", "None", "CubicSplineTable", "None", "Water4Particle", "", "Force" },
+ { nb_kernel_ElecRF_VdwCSTab_GeomW4W4_VF_avx_256_single, "nb_kernel_ElecRF_VdwCSTab_GeomW4W4_VF_avx_256_single", "avx_256_single", "ReactionField", "None", "CubicSplineTable", "None", "Water4Water4", "", "PotentialAndForce" },
+ { nb_kernel_ElecRF_VdwCSTab_GeomW4W4_F_avx_256_single, "nb_kernel_ElecRF_VdwCSTab_GeomW4W4_F_avx_256_single", "avx_256_single", "ReactionField", "None", "CubicSplineTable", "None", "Water4Water4", "", "Force" }
+};
+
+int
+kernellist_avx_256_single_size = sizeof(kernellist_avx_256_single)/sizeof(kernellist_avx_256_single[0]);
+
+#endif
--- /dev/null
+/*
+ * Note: this file was generated by the Gromacs c kernel generator.
+ *
+ * This source code is part of
+ *
+ * G R O M A C S
+ *
+ * Copyright (c) 2001-2012, The GROMACS Development Team
+ *
+ * Gromacs is a library for molecular simulation and trajectory analysis,
+ * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
+ * a full list of developers and information, check out http://www.gromacs.org
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * To help fund GROMACS development, we humbly ask that you cite
+ * the papers people have written on it - you can find them on the website.
+ */
+#ifndef nb_kernel_avx_256_single_h
+#define nb_kernel_avx_256_single_h
+
+#include "../nb_kernel.h"
+
+
+/* List of kernels for this architecture with metadata about them */
+extern nb_kernel_info_t
+kernellist_avx_256_single[];
+
+/* Length of kernellist_c */
+extern int
+kernellist_avx_256_single_size;
+
+#endif
--- /dev/null
+/* #if 0 */
+#error This file must be processed with the Gromacs pre-preprocessor
+/* #endif */
+/* #if INCLUDE_HEADER */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+
+#include "../nb_kernel.h"
+#include "types/simple.h"
+#include "vec.h"
+#include "nrnb.h"
+
+#include "gmx_math_x86_avx_256_single.h"
+#include "kernelutil_x86_avx_256_single.h"
+/* #endif */
+
+/* ## List of variables set by the generating script: */
+/* ## */
+/* ## Setttings that apply to the entire kernel: */
+/* ## KERNEL_ELEC: String, choice for electrostatic interactions */
+/* ## KERNEL_VDW: String, choice for van der Waals interactions */
+/* ## KERNEL_NAME: String, name of this kernel */
+/* ## KERNEL_VF: String telling if we calculate potential, force, or both */
+/* ## GEOMETRY_I/GEOMETRY_J: String, name of each geometry, e.g. 'Water3' or '1Particle' */
+/* ## */
+/* ## Setttings that apply to particles in the outer (I) or inner (J) loops: */
+/* ## PARTICLES_I[]/ Arrays with lists of i/j particles to use in kernel. It is */
+/* ## PARTICLES_J[]: just [0] for particle geometry, but can be longer for water */
+/* ## PARTICLES_ELEC_I[]/ Arrays with lists of i/j particle that have electrostatics */
+/* ## PARTICLES_ELEC_J[]: interactions that should be calculated in this kernel. */
+/* ## PARTICLES_VDW_I[]/ Arrays with the list of i/j particle that have VdW */
+/* ## PARTICLES_VDW_J[]: interactions that should be calculated in this kernel. */
+/* ## */
+/* ## Setttings for pairs of interactions (e.g. 2nd i particle against 1st j particle) */
+/* ## PAIRS_IJ[]: Array with (i,j) tuples of pairs for which interactions */
+/* ## should be calculated in this kernel. Zero-charge particles */
+/* ## do not have interactions with particles without vdw, and */
+/* ## Vdw-only interactions are not evaluated in a no-vdw-kernel. */
+/* ## INTERACTION_FLAGS[][]: 2D matrix, dimension e.g. 3*3 for water-water interactions. */
+/* ## For each i-j pair, the element [I][J] is a list of strings */
+/* ## defining properties/flags of this interaction. Examples */
+/* ## include 'electrostatics'/'vdw' if that type of interaction */
+/* ## should be evaluated, 'rsq'/'rinv'/'rinvsq' if those values */
+/* ## are needed, and 'exactcutoff' or 'shift','switch' to */
+/* ## decide if the force/potential should be modified. This way */
+/* ## we only calculate values absolutely needed for each case. */
+
+/* ## Calculate the size and offset for (merged/interleaved) table data */
+
+/*
+ * Gromacs nonbonded kernel: {KERNEL_NAME}
+ * Electrostatics interaction: {KERNEL_ELEC}
+ * VdW interaction: {KERNEL_VDW}
+ * Geometry: {GEOMETRY_I}-{GEOMETRY_J}
+ * Calculate force/pot: {KERNEL_VF}
+ */
+void
+{KERNEL_NAME}
+ (t_nblist * gmx_restrict nlist,
+ rvec * gmx_restrict xx,
+ rvec * gmx_restrict ff,
+ t_forcerec * gmx_restrict fr,
+ t_mdatoms * gmx_restrict mdatoms,
+ nb_kernel_data_t * gmx_restrict kernel_data,
+ t_nrnb * gmx_restrict nrnb)
+{
+ /* ## Not all variables are used for all kernels, but any optimizing compiler fixes that, */
+ /* ## so there is no point in going to extremes to exclude variables that are not needed. */
+ /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
+ * just 0 for non-waters.
+ * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
+ * jnr indices corresponding to data put in the four positions in the SIMD register.
+ */
+ int i_shift_offset,i_coord_offset,outeriter,inneriter;
+ int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
+ int jnrA,jnrB,jnrC,jnrD;
+ int jnrE,jnrF,jnrG,jnrH;
+ int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
+ int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
+ int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
+ int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
+ int *iinr,*jindex,*jjnr,*shiftidx,*gid;
+ real rcutoff_scalar;
+ real *shiftvec,*fshift,*x,*f;
+ real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
+ real scratch[4*DIM];
+ __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
+ /* #for I in PARTICLES_I */
+ real * vdwioffsetptr{I};
+ __m256 ix{I},iy{I},iz{I},fix{I},fiy{I},fiz{I},iq{I},isai{I};
+ /* #endfor */
+ /* #for J in PARTICLES_J */
+ int vdwjidx{J}A,vdwjidx{J}B,vdwjidx{J}C,vdwjidx{J}D,vdwjidx{J}E,vdwjidx{J}F,vdwjidx{J}G,vdwjidx{J}H;
+ __m256 jx{J},jy{J},jz{J},fjx{J},fjy{J},fjz{J},jq{J},isaj{J};
+ /* #endfor */
+ /* #for I,J in PAIRS_IJ */
+ __m256 dx{I}{J},dy{I}{J},dz{I}{J},rsq{I}{J},rinv{I}{J},rinvsq{I}{J},r{I}{J},qq{I}{J},c6_{I}{J},c12_{I}{J};
+ /* #endfor */
+ /* #if KERNEL_ELEC != 'None' */
+ __m256 velec,felec,velecsum,facel,crf,krf,krf2;
+ real *charge;
+ /* #endif */
+ /* #if 'GeneralizedBorn' in KERNEL_ELEC */
+ __m256i gbitab;
+ __m128i gbitab_lo,gbitab_hi;
+ __m256 vgb,fgb,vgbsum,dvdasum,gbscale,gbtabscale,isaprod,gbqqfactor,gbinvepsdiff,gbeps,dvdatmp;
+ __m256 minushalf = _mm256_set1_ps(-0.5);
+ real *invsqrta,*dvda,*gbtab;
+ /* #endif */
+ /* #if KERNEL_VDW != 'None' */
+ int nvdwtype;
+ __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
+ int *vdwtype;
+ real *vdwparam;
+ __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
+ __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
+ /* #endif */
+ /* #if 'Table' in KERNEL_ELEC or 'GeneralizedBorn' in KERNEL_ELEC or 'Table' in KERNEL_VDW */
+ __m256i vfitab;
+ __m128i vfitab_lo,vfitab_hi;
+ __m128i ifour = _mm_set1_epi32(4);
+ __m256 rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
+ real *vftab;
+ /* #endif */
+ /* #if 'Ewald' in KERNEL_ELEC */
+ __m256i ewitab;
+ __m128i ewitab_lo,ewitab_hi;
+ __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
+ __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
+ real *ewtab;
+ /* #endif */
+ /* #if 'PotentialSwitch' in [KERNEL_MOD_ELEC,KERNEL_MOD_VDW] */
+ __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
+ real rswitch_scalar,d_scalar;
+ /* #endif */
+ __m256 dummy_mask,cutoff_mask;
+ __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
+ __m256 one = _mm256_set1_ps(1.0);
+ __m256 two = _mm256_set1_ps(2.0);
+ x = xx[0];
+ f = ff[0];
+
+ nri = nlist->nri;
+ iinr = nlist->iinr;
+ jindex = nlist->jindex;
+ jjnr = nlist->jjnr;
+ shiftidx = nlist->shift;
+ gid = nlist->gid;
+ shiftvec = fr->shift_vec[0];
+ fshift = fr->fshift[0];
+ /* #if KERNEL_ELEC != 'None' */
+ facel = _mm256_set1_ps(fr->epsfac);
+ charge = mdatoms->chargeA;
+ /* #if 'ReactionField' in KERNEL_ELEC */
+ krf = _mm256_set1_ps(fr->ic->k_rf);
+ krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
+ crf = _mm256_set1_ps(fr->ic->c_rf);
+ /* #endif */
+ /* #endif */
+ /* #if KERNEL_VDW != 'None' */
+ nvdwtype = fr->ntype;
+ vdwparam = fr->nbfp;
+ vdwtype = mdatoms->typeA;
+ /* #endif */
+
+ /* #if 'Table' in KERNEL_ELEC and 'Table' in KERNEL_VDW */
+ vftab = kernel_data->table_elec_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec_vdw->scale);
+ /* #elif 'Table' in KERNEL_ELEC */
+ vftab = kernel_data->table_elec->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_elec->scale);
+ /* #elif 'Table' in KERNEL_VDW */
+ vftab = kernel_data->table_vdw->data;
+ vftabscale = _mm256_set1_ps(kernel_data->table_vdw->scale);
+ /* #endif */
+
+ /* #if 'Ewald' in KERNEL_ELEC */
+ sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
+ beta = _mm256_set1_ps(fr->ic->ewaldcoeff);
+ beta2 = _mm256_mul_ps(beta,beta);
+ beta3 = _mm256_mul_ps(beta,beta2);
+
+ /* #if KERNEL_VF=='Force' and KERNEL_MOD_ELEC!='PotentialSwitch' */
+ ewtab = fr->ic->tabq_coul_F;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+ /* #else */
+ ewtab = fr->ic->tabq_coul_FDV0;
+ ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
+ ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
+ /* #endif */
+ /* #endif */
+
+ /* #if KERNEL_ELEC=='GeneralizedBorn' */
+ invsqrta = fr->invsqrta;
+ dvda = fr->dvda;
+ gbtabscale = _mm256_set1_ps(fr->gbtab.scale);
+ gbtab = fr->gbtab.data;
+ gbinvepsdiff = _mm256_set1_ps((1.0/fr->epsilon_r) - (1.0/fr->gb_epsilon_solvent));
+ /* #endif */
+
+ /* #if 'Water' in GEOMETRY_I */
+ /* Setup water-specific parameters */
+ inr = nlist->iinr[0];
+ /* #for I in PARTICLES_ELEC_I */
+ iq{I} = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+{I}]));
+ /* #endfor */
+ /* #for I in PARTICLES_VDW_I */
+ vdwioffsetptr{I} = vdwparam+2*nvdwtype*vdwtype[inr+{I}];
+ /* #endfor */
+ /* #endif */
+
+ /* #if 'Water' in GEOMETRY_J */
+ /* #for J in PARTICLES_ELEC_J */
+ jq{J} = _mm256_set1_ps(charge[inr+{J}]);
+ /* #endfor */
+ /* #for J in PARTICLES_VDW_J */
+ vdwjidx{J}A = 2*vdwtype[inr+{J}];
+ /* #endfor */
+ /* #for I,J in PAIRS_IJ */
+ /* #if 'electrostatics' in INTERACTION_FLAGS[I][J] */
+ qq{I}{J} = _mm256_mul_ps(iq{I},jq{J});
+ /* #endif */
+ /* #if 'vdw' in INTERACTION_FLAGS[I][J] */
+ c6_{I}{J} = _mm256_set1_ps(vdwioffsetptr{I}[vdwjidx{J}A]);
+ c12_{I}{J} = _mm256_set1_ps(vdwioffsetptr{I}[vdwjidx{J}A+1]);
+ /* #endif */
+ /* #endfor */
+ /* #endif */
+
+ /* #if KERNEL_MOD_ELEC!='None' or KERNEL_MOD_VDW!='None' */
+ /* #if KERNEL_ELEC!='None' */
+ /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
+ rcutoff_scalar = fr->rcoulomb;
+ /* #else */
+ rcutoff_scalar = fr->rvdw;
+ /* #endif */
+ rcutoff = _mm256_set1_ps(rcutoff_scalar);
+ rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
+ /* #endif */
+
+ /* #if KERNEL_MOD_VDW=='PotentialShift' */
+ sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
+ rvdw = _mm256_set1_ps(fr->rvdw);
+ /* #endif */
+
+ /* #if 'PotentialSwitch' in [KERNEL_MOD_ELEC,KERNEL_MOD_VDW] */
+ /* #if KERNEL_MOD_ELEC=='PotentialSwitch' */
+ rswitch_scalar = fr->rcoulomb_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* #else */
+ rswitch_scalar = fr->rvdw_switch;
+ rswitch = _mm256_set1_ps(rswitch_scalar);
+ /* #endif */
+ /* Setup switch parameters */
+ d_scalar = rcutoff_scalar-rswitch_scalar;
+ d = _mm256_set1_ps(d_scalar);
+ swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
+ swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ /* #if 'Force' in KERNEL_VF */
+ swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
+ swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
+ swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
+ /* #endif */
+ /* #endif */
+
+ /* Avoid stupid compiler warnings */
+ jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
+ j_coord_offsetA = 0;
+ j_coord_offsetB = 0;
+ j_coord_offsetC = 0;
+ j_coord_offsetD = 0;
+ j_coord_offsetE = 0;
+ j_coord_offsetF = 0;
+ j_coord_offsetG = 0;
+ j_coord_offsetH = 0;
+
+ /* ## Keep track of the floating point operations we issue for reporting! */
+ /* #define OUTERFLOPS 0 */
+ outeriter = 0;
+ inneriter = 0;
+
+ for(iidx=0;iidx<4*DIM;iidx++)
+ {
+ scratch[iidx] = 0.0;
+ }
+
+ /* Start outer loop over neighborlists */
+ for(iidx=0; iidx<nri; iidx++)
+ {
+ /* Load shift vector for this list */
+ i_shift_offset = DIM*shiftidx[iidx];
+
+ /* Load limits for loop over neighbors */
+ j_index_start = jindex[iidx];
+ j_index_end = jindex[iidx+1];
+
+ /* Get outer coordinate index */
+ inr = iinr[iidx];
+ i_coord_offset = DIM*inr;
+
+ /* Load i particle coords and add shift vector */
+ /* #if GEOMETRY_I == 'Particle' */
+ gmx_mm256_load_shift_and_1rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
+ /* #elif GEOMETRY_I == 'Water3' */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
+ /* #elif GEOMETRY_I == 'Water4' */
+ /* #if 0 in PARTICLES_I */
+ gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
+ &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+ /* #else */
+ gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
+ &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
+ /* #endif */
+ /* #endif */
+
+ /* #if 'Force' in KERNEL_VF */
+ /* #for I in PARTICLES_I */
+ fix{I} = _mm256_setzero_ps();
+ fiy{I} = _mm256_setzero_ps();
+ fiz{I} = _mm256_setzero_ps();
+ /* #endfor */
+ /* #endif */
+
+ /* ## For water we already preloaded parameters at the start of the kernel */
+ /* #if not 'Water' in GEOMETRY_I */
+ /* Load parameters for i particles */
+ /* #for I in PARTICLES_ELEC_I */
+ iq{I} = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+{I}]));
+ /* #define OUTERFLOPS OUTERFLOPS+1 */
+ /* #if KERNEL_ELEC=='GeneralizedBorn' */
+ isai{I} = _mm256_set1_ps(invsqrta[inr+{I}]);
+ /* #endif */
+ /* #endfor */
+ /* #for I in PARTICLES_VDW_I */
+ vdwioffsetptr{I} = vdwparam+2*nvdwtype*vdwtype[inr+{I}];
+ /* #endfor */
+ /* #endif */
+
+ /* #if 'Potential' in KERNEL_VF */
+ /* Reset potential sums */
+ /* #if KERNEL_ELEC != 'None' */
+ velecsum = _mm256_setzero_ps();
+ /* #endif */
+ /* #if 'GeneralizedBorn' in KERNEL_ELEC */
+ vgbsum = _mm256_setzero_ps();
+ /* #endif */
+ /* #if KERNEL_VDW != 'None' */
+ vvdwsum = _mm256_setzero_ps();
+ /* #endif */
+ /* #endif */
+ /* #if 'GeneralizedBorn' in KERNEL_ELEC and 'Force' in KERNEL_VF */
+ dvdasum = _mm256_setzero_ps();
+ /* #endif */
+
+ /* #for ROUND in ['Loop','Epilogue'] */
+
+ /* #if ROUND =='Loop' */
+ /* Start inner kernel loop */
+ for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
+ {
+ /* ## First round is normal loop (next statement resets indentation) */
+ /* #if 0 */
+ }
+ /* #endif */
+ /* #else */
+ if(jidx<j_index_end)
+ {
+ /* ## Second round is epilogue */
+ /* #endif */
+ /* #define INNERFLOPS 0 */
+
+ /* Get j neighbor index, and coordinate index */
+ /* #if ROUND =='Loop' */
+ jnrA = jjnr[jidx];
+ jnrB = jjnr[jidx+1];
+ jnrC = jjnr[jidx+2];
+ jnrD = jjnr[jidx+3];
+ jnrE = jjnr[jidx+4];
+ jnrF = jjnr[jidx+5];
+ jnrG = jjnr[jidx+6];
+ jnrH = jjnr[jidx+7];
+ /* #else */
+ jnrlistA = jjnr[jidx];
+ jnrlistB = jjnr[jidx+1];
+ jnrlistC = jjnr[jidx+2];
+ jnrlistD = jjnr[jidx+3];
+ jnrlistE = jjnr[jidx+4];
+ jnrlistF = jjnr[jidx+5];
+ jnrlistG = jjnr[jidx+6];
+ jnrlistH = jjnr[jidx+7];
+ /* Sign of each element will be negative for non-real atoms.
+ * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
+ * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
+ */
+ dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
+ gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
+
+ jnrA = (jnrlistA>=0) ? jnrlistA : 0;
+ jnrB = (jnrlistB>=0) ? jnrlistB : 0;
+ jnrC = (jnrlistC>=0) ? jnrlistC : 0;
+ jnrD = (jnrlistD>=0) ? jnrlistD : 0;
+ jnrE = (jnrlistE>=0) ? jnrlistE : 0;
+ jnrF = (jnrlistF>=0) ? jnrlistF : 0;
+ jnrG = (jnrlistG>=0) ? jnrlistG : 0;
+ jnrH = (jnrlistH>=0) ? jnrlistH : 0;
+ /* #endif */
+ j_coord_offsetA = DIM*jnrA;
+ j_coord_offsetB = DIM*jnrB;
+ j_coord_offsetC = DIM*jnrC;
+ j_coord_offsetD = DIM*jnrD;
+ j_coord_offsetE = DIM*jnrE;
+ j_coord_offsetF = DIM*jnrF;
+ j_coord_offsetG = DIM*jnrG;
+ j_coord_offsetH = DIM*jnrH;
+
+ /* load j atom coordinates */
+ /* #if GEOMETRY_J == 'Particle' */
+ gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0);
+ /* #elif GEOMETRY_J == 'Water3' */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
+ /* #elif GEOMETRY_J == 'Water4' */
+ /* #if 0 in PARTICLES_J */
+ gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
+ x+j_coord_offsetC,x+j_coord_offsetD,
+ x+j_coord_offsetE,x+j_coord_offsetF,
+ x+j_coord_offsetG,x+j_coord_offsetH,
+ &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
+ &jy2,&jz2,&jx3,&jy3,&jz3);
+ /* #else */
+ gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
+ x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
+ x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
+ x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
+ &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
+ /* #endif */
+ /* #endif */
+
+ /* Calculate displacement vector */
+ /* #for I,J in PAIRS_IJ */
+ dx{I}{J} = _mm256_sub_ps(ix{I},jx{J});
+ dy{I}{J} = _mm256_sub_ps(iy{I},jy{J});
+ dz{I}{J} = _mm256_sub_ps(iz{I},jz{J});
+ /* #define INNERFLOPS INNERFLOPS+3 */
+ /* #endfor */
+
+ /* Calculate squared distance and things based on it */
+ /* #for I,J in PAIRS_IJ */
+ rsq{I}{J} = gmx_mm256_calc_rsq_ps(dx{I}{J},dy{I}{J},dz{I}{J});
+ /* #define INNERFLOPS INNERFLOPS+5 */
+ /* #endfor */
+
+ /* #for I,J in PAIRS_IJ */
+ /* #if 'rinv' in INTERACTION_FLAGS[I][J] */
+ rinv{I}{J} = gmx_mm256_invsqrt_ps(rsq{I}{J});
+ /* #define INNERFLOPS INNERFLOPS+5 */
+ /* #endif */
+ /* #endfor */
+
+ /* #for I,J in PAIRS_IJ */
+ /* #if 'rinvsq' in INTERACTION_FLAGS[I][J] */
+ /* # if 'rinv' not in INTERACTION_FLAGS[I][J] */
+ rinvsq{I}{J} = gmx_mm256_inv_ps(rsq{I}{J});
+ /* #define INNERFLOPS INNERFLOPS+4 */
+ /* #else */
+ rinvsq{I}{J} = _mm256_mul_ps(rinv{I}{J},rinv{I}{J});
+ /* #define INNERFLOPS INNERFLOPS+1 */
+ /* #endif */
+ /* #endif */
+ /* #endfor */
+
+ /* #if not 'Water' in GEOMETRY_J */
+ /* Load parameters for j particles */
+ /* #for J in PARTICLES_ELEC_J */
+ jq{J} = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+{J},charge+jnrB+{J},
+ charge+jnrC+{J},charge+jnrD+{J},
+ charge+jnrE+{J},charge+jnrF+{J},
+ charge+jnrG+{J},charge+jnrH+{J});
+ /* #if KERNEL_ELEC=='GeneralizedBorn' */
+ isaj{J} = gmx_mm256_load_8real_swizzle_ps(invsqrta+jnrA+{J},invsqrta+jnrB+{J},
+ invsqrta+jnrC+{J},invsqrta+jnrD+{J},
+ invsqrta+jnrE+{J},invsqrta+jnrF+{J},
+ invsqrta+jnrG+{J},invsqrta+jnrH+{J});
+ /* #endif */
+ /* #endfor */
+ /* #for J in PARTICLES_VDW_J */
+ vdwjidx{J}A = 2*vdwtype[jnrA+{J}];
+ vdwjidx{J}B = 2*vdwtype[jnrB+{J}];
+ vdwjidx{J}C = 2*vdwtype[jnrC+{J}];
+ vdwjidx{J}D = 2*vdwtype[jnrD+{J}];
+ vdwjidx{J}E = 2*vdwtype[jnrE+{J}];
+ vdwjidx{J}F = 2*vdwtype[jnrF+{J}];
+ vdwjidx{J}G = 2*vdwtype[jnrG+{J}];
+ vdwjidx{J}H = 2*vdwtype[jnrH+{J}];
+ /* #endfor */
+ /* #endif */
+
+ /* #if 'Force' in KERNEL_VF and not 'Particle' in GEOMETRY_I */
+ /* #for J in PARTICLES_J */
+ fjx{J} = _mm256_setzero_ps();
+ fjy{J} = _mm256_setzero_ps();
+ fjz{J} = _mm256_setzero_ps();
+ /* #endfor */
+ /* #endif */
+
+ /* #for I,J in PAIRS_IJ */
+
+ /**************************
+ * CALCULATE INTERACTIONS *
+ **************************/
+
+ /* ## Note special check for TIP4P-TIP4P. Since we are cutting of all hydrogen interactions we also cut the LJ-only O-O interaction */
+ /* #if 'exactcutoff' in INTERACTION_FLAGS[I][J] or (GEOMETRY_I=='Water4' and GEOMETRY_J=='Water4' and 'exactcutoff' in INTERACTION_FLAGS[1][1]) */
+ /* ## We always calculate rinv/rinvsq above to enable pipelineing in compilers (performance tested on x86) */
+ if (gmx_mm256_any_lt(rsq{I}{J},rcutoff2))
+ {
+ /* #if 0 ## this and the next two lines is a hack to maintain auto-indentation in template file */
+ }
+ /* #endif */
+ /* #define INNERFLOPS INNERFLOPS+1 */
+ /* #endif */
+
+ /* #if 'r' in INTERACTION_FLAGS[I][J] */
+ r{I}{J} = _mm256_mul_ps(rsq{I}{J},rinv{I}{J});
+ /* #if ROUND == 'Epilogue' */
+ r{I}{J} = _mm256_andnot_ps(dummy_mask,r{I}{J});
+ /* #define INNERFLOPS INNERFLOPS+1 */
+ /* #endif */
+ /* #define INNERFLOPS INNERFLOPS+1 */
+ /* #endif */
+
+ /* ## For water geometries we already loaded parameters at the start of the kernel */
+ /* #if not 'Water' in GEOMETRY_J */
+ /* Compute parameters for interactions between i and j atoms */
+ /* #if 'electrostatics' in INTERACTION_FLAGS[I][J] */
+ qq{I}{J} = _mm256_mul_ps(iq{I},jq{J});
+ /* #define INNERFLOPS INNERFLOPS+1 */
+ /* #endif */
+ /* #if 'vdw' in INTERACTION_FLAGS[I][J] */
+ gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr{I}+vdwjidx{J}A,
+ vdwioffsetptr{I}+vdwjidx{J}B,
+ vdwioffsetptr{I}+vdwjidx{J}C,
+ vdwioffsetptr{I}+vdwjidx{J}D,
+ vdwioffsetptr{I}+vdwjidx{J}E,
+ vdwioffsetptr{I}+vdwjidx{J}F,
+ vdwioffsetptr{I}+vdwjidx{J}G,
+ vdwioffsetptr{I}+vdwjidx{J}H,
+ &c6_{I}{J},&c12_{I}{J});
+ /* #endif */
+ /* #endif */
+
+ /* #if 'table' in INTERACTION_FLAGS[I][J] */
+ /* Calculate table index by multiplying r with table scale and truncate to integer */
+ rt = _mm256_mul_ps(r{I}{J},vftabscale);
+ vfitab = _mm256_cvttps_epi32(rt);
+ vfeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* #define INNERFLOPS INNERFLOPS+4 */
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ vfitab_lo = _mm256_extractf128_si256(vfitab,0x0);
+ vfitab_hi = _mm256_extractf128_si256(vfitab,0x1);
+ /* #if 'Table' in KERNEL_ELEC and 'Table' in KERNEL_VDW */
+ /* ## 3 tables, 4 bytes per point: multiply index by 12 */
+ vfitab_lo = _mm_slli_epi32(_mm_add_epi32(vfitab_lo,_mm_slli_epi32(vfitab_lo,1)),2);
+ vfitab_hi = _mm_slli_epi32(_mm_add_epi32(vfitab_hi,_mm_slli_epi32(vfitab_hi,1)),2);
+ /* #elif 'Table' in KERNEL_ELEC */
+ /* ## 1 table, 4 bytes per point: multiply index by 4 */
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,2);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,2);
+ /* #elif 'Table' in KERNEL_VDW */
+ /* ## 2 tables, 4 bytes per point: multiply index by 8 */
+ vfitab_lo = _mm_slli_epi32(vfitab_lo,3);
+ vfitab_hi = _mm_slli_epi32(vfitab_hi,3);
+ /* #endif */
+ /* #endif */
+
+ /* ## ELECTROSTATIC INTERACTIONS */
+ /* #if 'electrostatics' in INTERACTION_FLAGS[I][J] */
+
+ /* #if KERNEL_ELEC=='Coulomb' */
+
+ /* COULOMB ELECTROSTATICS */
+ velec = _mm256_mul_ps(qq{I}{J},rinv{I}{J});
+ /* #define INNERFLOPS INNERFLOPS+1 */
+ /* #if 'Force' in KERNEL_VF */
+ felec = _mm256_mul_ps(velec,rinvsq{I}{J});
+ /* #define INNERFLOPS INNERFLOPS+1 */
+ /* #endif */
+
+ /* #elif KERNEL_ELEC=='ReactionField' */
+
+ /* REACTION-FIELD ELECTROSTATICS */
+ /* #if 'Potential' in KERNEL_VF */
+ velec = _mm256_mul_ps(qq{I}{J},_mm256_sub_ps(_mm256_add_ps(rinv{I}{J},_mm256_mul_ps(krf,rsq{I}{J})),crf));
+ /* #define INNERFLOPS INNERFLOPS+4 */
+ /* #endif */
+ /* #if 'Force' in KERNEL_VF */
+ felec = _mm256_mul_ps(qq{I}{J},_mm256_sub_ps(_mm256_mul_ps(rinv{I}{J},rinvsq{I}{J}),krf2));
+ /* #define INNERFLOPS INNERFLOPS+3 */
+ /* #endif */
+
+ /* #elif KERNEL_ELEC=='GeneralizedBorn' */
+
+ /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
+ isaprod = _mm256_mul_ps(isai{I},isaj{J});
+ gbqqfactor = _mm256_xor_ps(signbit,_mm256_mul_ps(qq{I}{J},_mm256_mul_ps(isaprod,gbinvepsdiff)));
+ gbscale = _mm256_mul_ps(isaprod,gbtabscale);
+ /* #define INNERFLOPS INNERFLOPS+5 */
+
+ /* Calculate generalized born table index - this is a separate table from the normal one,
+ * but we use the same procedure by multiplying r with scale and truncating to integer.
+ */
+ rt = _mm256_mul_ps(r{I}{J},gbscale);
+ gbitab = _mm256_cvttps_epi32(rt);
+ gbeps = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
+ /* AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
+ gbitab_lo = _mm256_extractf128_si256(gbitab,0x0);
+ gbitab_hi = _mm256_extractf128_si256(gbitab,0x1);
+ gbitab_lo = _mm_slli_epi32(gbitab_lo,2);
+ gbitab_hi = _mm_slli_epi32(gbitab_hi,2);
+ Y = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,0)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,1)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,2)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(gbtab + _mm_extract_epi32(gbitab_hi,3)),
+ _mm_load_ps(gbtab + _mm_extract_epi32(gbitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(gbeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(gbeps,_mm256_add_ps(G,Heps)));
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(gbeps,Fp));
+ vgb = _mm256_mul_ps(gbqqfactor,VV);
+ /* #define INNERFLOPS INNERFLOPS+10 */
+
+ /* #if 'Force' in KERNEL_VF */
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(gbeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fgb = _mm256_mul_ps(gbqqfactor,_mm256_mul_ps(FF,gbscale));
+ dvdatmp = _mm256_mul_ps(minushalf,_mm256_add_ps(vgb,_mm256_mul_ps(fgb,r{I}{J})));
+ dvdasum = _mm256_add_ps(dvdasum,dvdatmp);
+ /* #if ROUND == 'Loop' */
+ fjptrA = dvda+jnrA;
+ fjptrB = dvda+jnrB;
+ fjptrC = dvda+jnrC;
+ fjptrD = dvda+jnrD;
+ fjptrE = dvda+jnrE;
+ fjptrF = dvda+jnrF;
+ fjptrG = dvda+jnrG;
+ fjptrH = dvda+jnrH;
+ /* #else */
+ /* The pointers to scratch make sure that this code with compilers that take gmx_restrict seriously (e.g. icc 13) really can't screw things up. */
+ fjptrA = (jnrlistA>=0) ? dvda+jnrA : scratch;
+ fjptrB = (jnrlistB>=0) ? dvda+jnrB : scratch;
+ fjptrC = (jnrlistC>=0) ? dvda+jnrC : scratch;
+ fjptrD = (jnrlistD>=0) ? dvda+jnrD : scratch;
+ fjptrE = (jnrlistE>=0) ? dvda+jnrE : scratch;
+ fjptrF = (jnrlistF>=0) ? dvda+jnrF : scratch;
+ fjptrG = (jnrlistG>=0) ? dvda+jnrG : scratch;
+ fjptrH = (jnrlistH>=0) ? dvda+jnrH : scratch;
+ /* #endif */
+ gmx_mm256_increment_8real_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ _mm256_mul_ps(dvdatmp,_mm256_mul_ps(isaj{J},isaj{J})));
+ /* #define INNERFLOPS INNERFLOPS+12 */
+ /* #endif */
+ velec = _mm256_mul_ps(qq{I}{J},rinv{I}{J});
+ /* #define INNERFLOPS INNERFLOPS+1 */
+ /* #if 'Force' in KERNEL_VF */
+ felec = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(velec,rinv{I}{J}),fgb),rinv{I}{J});
+ /* #define INNERFLOPS INNERFLOPS+3 */
+ /* #endif */
+
+ /* #elif KERNEL_ELEC=='Ewald' */
+ /* EWALD ELECTROSTATICS */
+
+ /* Analytical PME correction */
+ zeta2 = _mm256_mul_ps(beta2,rsq{I}{J});
+ /* #if 'Force' in KERNEL_VF */
+ rinv3 = _mm256_mul_ps(rinvsq{I}{J},rinv{I}{J});
+ pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
+ felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
+ felec = _mm256_mul_ps(qq{I}{J},felec);
+ /* #define INNERFLOPS INNERFLOPS+31 */
+ /* #endif */
+ /* #if 'Potential' in KERNEL_VF or KERNEL_MOD_ELEC=='PotentialSwitch' */
+ pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
+ pmecorrV = _mm256_mul_ps(pmecorrV,beta);
+ /* #define INNERFLOPS INNERFLOPS+27 */
+ /* #if KERNEL_MOD_ELEC=='PotentialShift' */
+ velec = _mm256_sub_ps(_mm256_sub_ps(rinv{I}{J},sh_ewald),pmecorrV);
+ /* #define INNERFLOPS INNERFLOPS+21 */
+ /* #else */
+ velec = _mm256_sub_ps(rinv{I}{J},pmecorrV);
+ /* #endif */
+ velec = _mm256_mul_ps(qq{I}{J},velec);
+ /* #endif */
+
+ /* #elif KERNEL_ELEC=='CubicSplineTable' */
+
+ /* CUBIC SPLINE TABLE ELECTROSTATICS */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ /* #define INNERFLOPS INNERFLOPS+4 */
+ /* #if 'Potential' in KERNEL_VF */
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ velec = _mm256_mul_ps(qq{I}{J},VV);
+ /* #define INNERFLOPS INNERFLOPS+3 */
+ /* #endif */
+ /* #if 'Force' in KERNEL_VF */
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ felec = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_mul_ps(qq{I}{J},FF),_mm256_mul_ps(vftabscale,rinv{I}{J})));
+ /* #define INNERFLOPS INNERFLOPS+7 */
+ /* #endif */
+ /* #endif */
+ /* ## End of check for electrostatics interaction forms */
+ /* #endif */
+ /* ## END OF ELECTROSTATIC INTERACTION CHECK FOR PAIR I-J */
+
+ /* #if 'vdw' in INTERACTION_FLAGS[I][J] */
+
+ /* #if KERNEL_VDW=='LennardJones' */
+
+ /* LENNARD-JONES DISPERSION/REPULSION */
+
+ rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq{I}{J},rinvsq{I}{J}),rinvsq{I}{J});
+ /* #define INNERFLOPS INNERFLOPS+2 */
+ /* #if 'Potential' in KERNEL_VF or KERNEL_MOD_VDW=='PotentialSwitch' */
+ vvdw6 = _mm256_mul_ps(c6_{I}{J},rinvsix);
+ vvdw12 = _mm256_mul_ps(c12_{I}{J},_mm256_mul_ps(rinvsix,rinvsix));
+ /* #define INNERFLOPS INNERFLOPS+3 */
+ /* #if KERNEL_MOD_VDW=='PotentialShift' */
+ vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_{I}{J},_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
+ _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_{I}{J},sh_vdw_invrcut6)),one_sixth));
+ /* #define INNERFLOPS INNERFLOPS+8 */
+ /* #else */
+ vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
+ /* #define INNERFLOPS INNERFLOPS+3 */
+ /* #endif */
+ /* ## Check for force inside potential check, i.e. this means we already did the potential part */
+ /* #if 'Force' in KERNEL_VF */
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq{I}{J});
+ /* #define INNERFLOPS INNERFLOPS+2 */
+ /* #endif */
+ /* #elif KERNEL_VF=='Force' */
+ /* ## Force-only LennardJones makes it possible to save 1 flop (they do add up...) */
+ fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_{I}{J},rinvsix),c6_{I}{J}),_mm256_mul_ps(rinvsix,rinvsq{I}{J}));
+ /* #define INNERFLOPS INNERFLOPS+4 */
+ /* #endif */
+
+ /* #elif KERNEL_VDW=='CubicSplineTable' */
+
+ /* CUBIC SPLINE TABLE DISPERSION */
+ /* #if 'Table' in KERNEL_ELEC */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ /* #endif */
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ /* #define INNERFLOPS INNERFLOPS+4 */
+ /* #if 'Potential' in KERNEL_VF */
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw6 = _mm256_mul_ps(c6_{I}{J},VV);
+ /* #define INNERFLOPS INNERFLOPS+3 */
+ /* #endif */
+ /* #if 'Force' in KERNEL_VF */
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw6 = _mm256_mul_ps(c6_{I}{J},FF);
+ /* #define INNERFLOPS INNERFLOPS+4 */
+ /* #endif */
+
+ /* CUBIC SPLINE TABLE REPULSION */
+ vfitab_lo = _mm_add_epi32(vfitab_lo,ifour);
+ vfitab_hi = _mm_add_epi32(vfitab_hi,ifour);
+ Y = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
+ F = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
+ G = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
+ H = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
+ _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
+ GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
+ Heps = _mm256_mul_ps(vfeps,H);
+ Fp = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
+ /* #define INNERFLOPS INNERFLOPS+4 */
+ /* #if 'Potential' in KERNEL_VF */
+ VV = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
+ vvdw12 = _mm256_mul_ps(c12_{I}{J},VV);
+ /* #define INNERFLOPS INNERFLOPS+3 */
+ /* #endif */
+ /* #if 'Force' in KERNEL_VF */
+ FF = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
+ fvdw12 = _mm256_mul_ps(c12_{I}{J},FF);
+ /* #define INNERFLOPS INNERFLOPS+5 */
+ /* #endif */
+ /* #if 'Potential' in KERNEL_VF */
+ vvdw = _mm256_add_ps(vvdw12,vvdw6);
+ /* #define INNERFLOPS INNERFLOPS+1 */
+ /* #endif */
+ /* #if 'Force' in KERNEL_VF */
+ fvdw = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv{I}{J})));
+ /* #define INNERFLOPS INNERFLOPS+4 */
+ /* #endif */
+ /* #endif */
+ /* ## End of check for vdw interaction forms */
+ /* #endif */
+ /* ## END OF VDW INTERACTION CHECK FOR PAIR I-J */
+
+ /* #if 'switch' in INTERACTION_FLAGS[I][J] */
+ d = _mm256_sub_ps(r{I}{J},rswitch);
+ d = _mm256_max_ps(d,_mm256_setzero_ps());
+ d2 = _mm256_mul_ps(d,d);
+ sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
+ /* #define INNERFLOPS INNERFLOPS+10 */
+
+ /* #if 'Force' in KERNEL_VF */
+ dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
+ /* #define INNERFLOPS INNERFLOPS+5 */
+ /* #endif */
+
+ /* Evaluate switch function */
+ /* #if 'Force' in KERNEL_VF */
+ /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
+ /* #if 'electrostatics' in INTERACTION_FLAGS[I][J] and KERNEL_MOD_ELEC=='PotentialSwitch' */
+ felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv{I}{J},_mm256_mul_ps(velec,dsw)) );
+ /* #define INNERFLOPS INNERFLOPS+4 */
+ /* #endif */
+ /* #if 'vdw' in INTERACTION_FLAGS[I][J] and KERNEL_MOD_VDW=='PotentialSwitch' */
+ fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv{I}{J},_mm256_mul_ps(vvdw,dsw)) );
+ /* #define INNERFLOPS INNERFLOPS+4 */
+ /* #endif */
+ /* #endif */
+ /* #if 'Potential' in KERNEL_VF */
+ /* #if 'electrostatics' in INTERACTION_FLAGS[I][J] and KERNEL_MOD_ELEC=='PotentialSwitch' */
+ velec = _mm256_mul_ps(velec,sw);
+ /* #define INNERFLOPS INNERFLOPS+1 */
+ /* #endif */
+ /* #if 'vdw' in INTERACTION_FLAGS[I][J] and KERNEL_MOD_VDW=='PotentialSwitch' */
+ vvdw = _mm256_mul_ps(vvdw,sw);
+ /* #define INNERFLOPS INNERFLOPS+1 */
+ /* #endif */
+ /* #endif */
+ /* #endif */
+ /* ## Note special check for TIP4P-TIP4P. Since we are cutting of all hydrogen interactions we also cut the LJ-only O-O interaction */
+ /* #if 'exactcutoff' in INTERACTION_FLAGS[I][J] or (GEOMETRY_I=='Water4' and GEOMETRY_J=='Water4' and 'exactcutoff' in INTERACTION_FLAGS[1][1]) */
+ cutoff_mask = _mm256_cmp_ps(rsq{I}{J},rcutoff2,_CMP_LT_OQ);
+ /* #define INNERFLOPS INNERFLOPS+1 */
+ /* #endif */
+
+ /* #if 'Potential' in KERNEL_VF */
+ /* Update potential sum for this i atom from the interaction with this j atom. */
+ /* #if 'electrostatics' in INTERACTION_FLAGS[I][J] */
+ /* #if 'exactcutoff' in INTERACTION_FLAGS[I][J] */
+ velec = _mm256_and_ps(velec,cutoff_mask);
+ /* #define INNERFLOPS INNERFLOPS+1 */
+ /* #endif */
+ /* #if ROUND == 'Epilogue' */
+ velec = _mm256_andnot_ps(dummy_mask,velec);
+ /* #endif */
+ velecsum = _mm256_add_ps(velecsum,velec);
+ /* #define INNERFLOPS INNERFLOPS+1 */
+ /* #if KERNEL_ELEC=='GeneralizedBorn' */
+ /* #if 'exactcutoff' in INTERACTION_FLAGS[I][J] */
+ vgb = _mm256_and_ps(vgb,cutoff_mask);
+ /* #define INNERFLOPS INNERFLOPS+1 */
+ /* #endif */
+ /* #if ROUND == 'Epilogue' */
+ vgb = _mm256_andnot_ps(dummy_mask,vgb);
+ /* #endif */
+ vgbsum = _mm256_add_ps(vgbsum,vgb);
+ /* #define INNERFLOPS INNERFLOPS+1 */
+ /* #endif */
+ /* #endif */
+ /* #if 'vdw' in INTERACTION_FLAGS[I][J] */
+ /* ## Note special check for TIP4P-TIP4P. Since we are cutting of all hydrogen interactions we also cut the LJ-only O-O interaction */
+ /* #if 'exactcutoff' in INTERACTION_FLAGS[I][J] or (GEOMETRY_I=='Water4' and GEOMETRY_J=='Water4' and 'exactcutoff' in INTERACTION_FLAGS[1][1]) */
+ vvdw = _mm256_and_ps(vvdw,cutoff_mask);
+ /* #define INNERFLOPS INNERFLOPS+1 */
+ /* #endif */
+ /* #if ROUND == 'Epilogue' */
+ vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
+ /* #endif */
+ vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
+ /* #define INNERFLOPS INNERFLOPS+1 */
+ /* #endif */
+ /* #endif */
+
+ /* #if 'Force' in KERNEL_VF */
+
+ /* #if 'electrostatics' in INTERACTION_FLAGS[I][J] and 'vdw' in INTERACTION_FLAGS[I][J] */
+ fscal = _mm256_add_ps(felec,fvdw);
+ /* #define INNERFLOPS INNERFLOPS+1 */
+ /* #elif 'electrostatics' in INTERACTION_FLAGS[I][J] */
+ fscal = felec;
+ /* #elif 'vdw' in INTERACTION_FLAGS[I][J] */
+ fscal = fvdw;
+ /* #endif */
+
+ /* ## Note special check for TIP4P-TIP4P. Since we are cutting of all hydrogen interactions we also cut the LJ-only O-O interaction */
+ /* #if 'exactcutoff' in INTERACTION_FLAGS[I][J] or (GEOMETRY_I=='Water4' and GEOMETRY_J=='Water4' and 'exactcutoff' in INTERACTION_FLAGS[1][1]) */
+ fscal = _mm256_and_ps(fscal,cutoff_mask);
+ /* #define INNERFLOPS INNERFLOPS+1 */
+ /* #endif */
+
+ /* #if ROUND == 'Epilogue' */
+ fscal = _mm256_andnot_ps(dummy_mask,fscal);
+ /* #endif */
+
+ /* Calculate temporary vectorial force */
+ tx = _mm256_mul_ps(fscal,dx{I}{J});
+ ty = _mm256_mul_ps(fscal,dy{I}{J});
+ tz = _mm256_mul_ps(fscal,dz{I}{J});
+
+ /* Update vectorial force */
+ fix{I} = _mm256_add_ps(fix{I},tx);
+ fiy{I} = _mm256_add_ps(fiy{I},ty);
+ fiz{I} = _mm256_add_ps(fiz{I},tz);
+ /* #define INNERFLOPS INNERFLOPS+6 */
+
+ /* #if GEOMETRY_I == 'Particle' */
+ /* #if ROUND == 'Loop' */
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ /* #else */
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ /* #endif */
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,tx,ty,tz);
+ /* #define INNERFLOPS INNERFLOPS+3 */
+ /* #else */
+ fjx{J} = _mm256_add_ps(fjx{J},tx);
+ fjy{J} = _mm256_add_ps(fjy{J},ty);
+ fjz{J} = _mm256_add_ps(fjz{J},tz);
+ /* #define INNERFLOPS INNERFLOPS+3 */
+ /* #endif */
+
+ /* #endif */
+
+ /* ## Note special check for TIP4P-TIP4P. Since we are cutting of all hydrogen interactions we also cut the LJ-only O-O interaction */
+ /* #if 'exactcutoff' in INTERACTION_FLAGS[I][J] or (GEOMETRY_I=='Water4' and GEOMETRY_J=='Water4' and 'exactcutoff' in INTERACTION_FLAGS[1][1]) */
+ /* #if 0 ## This and next two lines is a hack to maintain indentation in template file */
+ {
+ /* #endif */
+ }
+ /* #endif */
+ /* ## End of check for the interaction being outside the cutoff */
+
+ /* #endfor */
+ /* ## End of loop over i-j interaction pairs */
+
+ /* #if GEOMETRY_I != 'Particle' */
+ /* #if ROUND == 'Loop' */
+ fjptrA = f+j_coord_offsetA;
+ fjptrB = f+j_coord_offsetB;
+ fjptrC = f+j_coord_offsetC;
+ fjptrD = f+j_coord_offsetD;
+ fjptrE = f+j_coord_offsetE;
+ fjptrF = f+j_coord_offsetF;
+ fjptrG = f+j_coord_offsetG;
+ fjptrH = f+j_coord_offsetH;
+ /* #else */
+ fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
+ fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
+ fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
+ fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
+ fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
+ fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
+ fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
+ fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
+ /* #endif */
+ /* #endif */
+
+ /* #if 'Water' in GEOMETRY_I and GEOMETRY_J == 'Particle' */
+ gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
+ /* #define INNERFLOPS INNERFLOPS+3 */
+ /* #elif GEOMETRY_J == 'Water3' */
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
+ /* #define INNERFLOPS INNERFLOPS+9 */
+ /* #elif GEOMETRY_J == 'Water4' */
+ /* #if 0 in PARTICLES_J */
+ gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
+ fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
+ fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+ /* #define INNERFLOPS INNERFLOPS+12 */
+ /* #else */
+ gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
+ fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
+ fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
+ /* #define INNERFLOPS INNERFLOPS+9 */
+ /* #endif */
+ /* #endif */
+
+ /* Inner loop uses {INNERFLOPS} flops */
+ }
+
+ /* #endfor */
+
+ /* End of innermost loop */
+
+ /* #if 'Force' in KERNEL_VF */
+ /* #if GEOMETRY_I == 'Particle' */
+ gmx_mm256_update_iforce_1atom_swizzle_ps(fix0,fiy0,fiz0,
+ f+i_coord_offset,fshift+i_shift_offset);
+ /* #define OUTERFLOPS OUTERFLOPS+6 */
+ /* #elif GEOMETRY_I == 'Water3' */
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
+ f+i_coord_offset,fshift+i_shift_offset);
+ /* #define OUTERFLOPS OUTERFLOPS+18 */
+ /* #elif GEOMETRY_I == 'Water4' */
+ /* #if 0 in PARTICLES_I */
+ gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset,fshift+i_shift_offset);
+ /* #define OUTERFLOPS OUTERFLOPS+24 */
+ /* #else */
+ gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
+ f+i_coord_offset+DIM,fshift+i_shift_offset);
+ /* #define OUTERFLOPS OUTERFLOPS+18 */
+ /* #endif */
+ /* #endif */
+ /* #endif */
+
+ /* #if 'Potential' in KERNEL_VF */
+ ggid = gid[iidx];
+ /* Update potential energies */
+ /* #if KERNEL_ELEC != 'None' */
+ gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
+ /* #define OUTERFLOPS OUTERFLOPS+1 */
+ /* #endif */
+ /* #if 'GeneralizedBorn' in KERNEL_ELEC */
+ gmx_mm256_update_1pot_ps(vgbsum,kernel_data->energygrp_polarization+ggid);
+ /* #define OUTERFLOPS OUTERFLOPS+1 */
+ /* #endif */
+ /* #if KERNEL_VDW != 'None' */
+ gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
+ /* #define OUTERFLOPS OUTERFLOPS+1 */
+ /* #endif */
+ /* #endif */
+ /* #if 'GeneralizedBorn' in KERNEL_ELEC and 'Force' in KERNEL_VF */
+ dvdasum = _mm256_mul_ps(dvdasum, _mm256_mul_ps(isai{I},isai{I}));
+ gmx_mm256_update_1pot_ps(dvdasum,dvda+inr);
+ /* #endif */
+
+ /* Increment number of inner iterations */
+ inneriter += j_index_end - j_index_start;
+
+ /* Outer loop uses {OUTERFLOPS} flops */
+ }
+
+ /* Increment number of outer iterations */
+ outeriter += nri;
+
+ /* Update outer/inner flops */
+ /* ## NB: This is not important, it just affects the flopcount. However, since our preprocessor is */
+ /* ## primitive and replaces aggressively even in strings inside these directives, we need to */
+ /* ## assemble the main part of the name (containing KERNEL/ELEC/VDW) directly in the source. */
+ /* #if GEOMETRY_I == 'Water3' */
+ /* #define ISUFFIX '_W3' */
+ /* #elif GEOMETRY_I == 'Water4' */
+ /* #define ISUFFIX '_W4' */
+ /* #else */
+ /* #define ISUFFIX '' */
+ /* #endif */
+ /* #if GEOMETRY_J == 'Water3' */
+ /* #define JSUFFIX 'W3' */
+ /* #elif GEOMETRY_J == 'Water4' */
+ /* #define JSUFFIX 'W4' */
+ /* #else */
+ /* #define JSUFFIX '' */
+ /* #endif */
+ /* #if 'PotentialAndForce' in KERNEL_VF */
+ /* #define VFSUFFIX '_VF' */
+ /* #elif 'Potential' in KERNEL_VF */
+ /* #define VFSUFFIX '_V' */
+ /* #else */
+ /* #define VFSUFFIX '_F' */
+ /* #endif */
+
+ /* #if KERNEL_ELEC != 'None' and KERNEL_VDW != 'None' */
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW{ISUFFIX}{JSUFFIX}{VFSUFFIX},outeriter*{OUTERFLOPS} + inneriter*{INNERFLOPS});
+ /* #elif KERNEL_ELEC != 'None' */
+ inc_nrnb(nrnb,eNR_NBKERNEL_ELEC{ISUFFIX}{JSUFFIX}{VFSUFFIX},outeriter*{OUTERFLOPS} + inneriter*{INNERFLOPS});
+ /* #else */
+ inc_nrnb(nrnb,eNR_NBKERNEL_VDW{ISUFFIX}{JSUFFIX}{VFSUFFIX},outeriter*{OUTERFLOPS} + inneriter*{INNERFLOPS});
+ /* #endif */
+}
/* Different default (c) and accelerated interaction-specific kernels */
#include "nb_kernel_c/nb_kernel_c.h"
-/* Temporary enabler until we add the AVX_256 kernels */
-#if (defined GMX_CPU_ACCELERATION_X86_AVX_256)
-# define GMX_CPU_ACCELERATION_X86_SSE4_1
-#endif
-
#if (defined GMX_CPU_ACCELERATION_X86_SSE2) && !(defined GMX_DOUBLE)
# include "nb_kernel_sse2_single/nb_kernel_sse2_single.h"
#endif
#if (defined GMX_CPU_ACCELERATION_X86_AVX_128_FMA) && !(defined GMX_DOUBLE)
# include "nb_kernel_avx_128_fma_single/nb_kernel_avx_128_fma_single.h"
#endif
+#if (defined GMX_CPU_ACCELERATION_X86_AVX_256) && !(defined GMX_DOUBLE)
+# include "nb_kernel_avx_256_single/nb_kernel_avx_256_single.h"
+#endif
#ifdef GMX_THREAD_MPI
#endif
#if (defined GMX_CPU_ACCELERATION_X86_AVX_128_FMA) && !(defined GMX_DOUBLE)
nb_kernel_list_add_kernels(kernellist_avx_128_fma_single,kernellist_avx_128_fma_single_size);
+#endif
+#if (defined GMX_CPU_ACCELERATION_X86_AVX_256) && !(defined GMX_DOUBLE)
+ nb_kernel_list_add_kernels(kernellist_avx_256_single,kernellist_avx_256_single_size);
#endif
; /* empty statement to avoid a completely empty block */
}
}
arch_and_padding[] =
{
+#if (defined GMX_CPU_ACCELERATION_X86_AVX_256) && !(defined GMX_DOUBLE)
+ { "avx_256_single", 8 },
+#endif
#if (defined GMX_CPU_ACCELERATION_X86_AVX_128_FMA) && !(defined GMX_DOUBLE)
{ "avx_128_fma_single", 4 },
#endif
+++ /dev/null
-Makefile
-Makefile.in
-.deps
-.libs
+++ /dev/null
-Makefile
-Makefile.in
-.libs
-.deps
+++ /dev/null
-Makefile
-Makefile.in
-.deps
-.libs
\ No newline at end of file
+++ /dev/null
-Makefile
-Makefile.in
-.deps
-.libs
\ No newline at end of file
+++ /dev/null
-Makefile
-Makefile.in
-.deps
-.libs
\ No newline at end of file
+++ /dev/null
-Makefile
-Makefile.in
-.deps
-.libs
\ No newline at end of file