2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * Note: this file was generated by the GROMACS sse4_1_double kernel generator.
42 #include "../nb_kernel.h"
43 #include "types/simple.h"
44 #include "gromacs/math/vec.h"
47 #include "gromacs/simd/math_x86_sse4_1_double.h"
48 #include "kernelutil_x86_sse4_1_double.h"
51 * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJEwSh_GeomP1P1_VF_sse4_1_double
52 * Electrostatics interaction: Ewald
53 * VdW interaction: LJEwald
54 * Geometry: Particle-Particle
55 * Calculate force/pot: PotentialAndForce
58 nb_kernel_ElecEwSh_VdwLJEwSh_GeomP1P1_VF_sse4_1_double
59 (t_nblist * gmx_restrict nlist,
60 rvec * gmx_restrict xx,
61 rvec * gmx_restrict ff,
62 t_forcerec * gmx_restrict fr,
63 t_mdatoms * gmx_restrict mdatoms,
64 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
65 t_nrnb * gmx_restrict nrnb)
67 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
68 * just 0 for non-waters.
69 * Suffixes A,B refer to j loop unrolling done with SSE double precision, e.g. for the two different
70 * jnr indices corresponding to data put in the four positions in the SIMD register.
72 int i_shift_offset,i_coord_offset,outeriter,inneriter;
73 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
75 int j_coord_offsetA,j_coord_offsetB;
76 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
78 real *shiftvec,*fshift,*x,*f;
79 __m128d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
81 __m128d ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
82 int vdwjidx0A,vdwjidx0B;
83 __m128d jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
84 __m128d dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
85 __m128d velec,felec,velecsum,facel,crf,krf,krf2;
88 __m128d rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
91 __m128d one_sixth = _mm_set1_pd(1.0/6.0);
92 __m128d one_twelfth = _mm_set1_pd(1.0/12.0);
94 __m128d ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
96 __m128d one_half = _mm_set1_pd(0.5);
97 __m128d minus_one = _mm_set1_pd(-1.0);
99 __m128d ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
101 __m128d dummy_mask,cutoff_mask;
102 __m128d signbit = gmx_mm_castsi128_pd( _mm_set_epi32(0x80000000,0x00000000,0x80000000,0x00000000) );
103 __m128d one = _mm_set1_pd(1.0);
104 __m128d two = _mm_set1_pd(2.0);
110 jindex = nlist->jindex;
112 shiftidx = nlist->shift;
114 shiftvec = fr->shift_vec[0];
115 fshift = fr->fshift[0];
116 facel = _mm_set1_pd(fr->epsfac);
117 charge = mdatoms->chargeA;
118 nvdwtype = fr->ntype;
120 vdwtype = mdatoms->typeA;
121 vdwgridparam = fr->ljpme_c6grid;
122 sh_lj_ewald = _mm_set1_pd(fr->ic->sh_lj_ewald);
123 ewclj = _mm_set1_pd(fr->ewaldcoeff_lj);
124 ewclj2 = _mm_mul_pd(minus_one,_mm_mul_pd(ewclj,ewclj));
126 sh_ewald = _mm_set1_pd(fr->ic->sh_ewald);
127 ewtab = fr->ic->tabq_coul_FDV0;
128 ewtabscale = _mm_set1_pd(fr->ic->tabq_scale);
129 ewtabhalfspace = _mm_set1_pd(0.5/fr->ic->tabq_scale);
131 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
132 rcutoff_scalar = fr->rcoulomb;
133 rcutoff = _mm_set1_pd(rcutoff_scalar);
134 rcutoff2 = _mm_mul_pd(rcutoff,rcutoff);
136 sh_vdw_invrcut6 = _mm_set1_pd(fr->ic->sh_invrc6);
137 rvdw = _mm_set1_pd(fr->rvdw);
139 /* Avoid stupid compiler warnings */
147 /* Start outer loop over neighborlists */
148 for(iidx=0; iidx<nri; iidx++)
150 /* Load shift vector for this list */
151 i_shift_offset = DIM*shiftidx[iidx];
153 /* Load limits for loop over neighbors */
154 j_index_start = jindex[iidx];
155 j_index_end = jindex[iidx+1];
157 /* Get outer coordinate index */
159 i_coord_offset = DIM*inr;
161 /* Load i particle coords and add shift vector */
162 gmx_mm_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
164 fix0 = _mm_setzero_pd();
165 fiy0 = _mm_setzero_pd();
166 fiz0 = _mm_setzero_pd();
168 /* Load parameters for i particles */
169 iq0 = _mm_mul_pd(facel,_mm_load1_pd(charge+inr+0));
170 vdwioffset0 = 2*nvdwtype*vdwtype[inr+0];
172 /* Reset potential sums */
173 velecsum = _mm_setzero_pd();
174 vvdwsum = _mm_setzero_pd();
176 /* Start inner kernel loop */
177 for(jidx=j_index_start; jidx<j_index_end-1; jidx+=2)
180 /* Get j neighbor index, and coordinate index */
183 j_coord_offsetA = DIM*jnrA;
184 j_coord_offsetB = DIM*jnrB;
186 /* load j atom coordinates */
187 gmx_mm_load_1rvec_2ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
190 /* Calculate displacement vector */
191 dx00 = _mm_sub_pd(ix0,jx0);
192 dy00 = _mm_sub_pd(iy0,jy0);
193 dz00 = _mm_sub_pd(iz0,jz0);
195 /* Calculate squared distance and things based on it */
196 rsq00 = gmx_mm_calc_rsq_pd(dx00,dy00,dz00);
198 rinv00 = gmx_mm_invsqrt_pd(rsq00);
200 rinvsq00 = _mm_mul_pd(rinv00,rinv00);
202 /* Load parameters for j particles */
203 jq0 = gmx_mm_load_2real_swizzle_pd(charge+jnrA+0,charge+jnrB+0);
204 vdwjidx0A = 2*vdwtype[jnrA+0];
205 vdwjidx0B = 2*vdwtype[jnrB+0];
207 /**************************
208 * CALCULATE INTERACTIONS *
209 **************************/
211 if (gmx_mm_any_lt(rsq00,rcutoff2))
214 r00 = _mm_mul_pd(rsq00,rinv00);
216 /* Compute parameters for interactions between i and j atoms */
217 qq00 = _mm_mul_pd(iq0,jq0);
218 gmx_mm_load_2pair_swizzle_pd(vdwparam+vdwioffset0+vdwjidx0A,
219 vdwparam+vdwioffset0+vdwjidx0B,&c6_00,&c12_00);
220 c6grid_00 = gmx_mm_load_2real_swizzle_pd(vdwgridparam+vdwioffset0+vdwjidx0A,
221 vdwgridparam+vdwioffset0+vdwjidx0B);
223 /* EWALD ELECTROSTATICS */
225 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
226 ewrt = _mm_mul_pd(r00,ewtabscale);
227 ewitab = _mm_cvttpd_epi32(ewrt);
228 eweps = _mm_sub_pd(ewrt,_mm_round_pd(ewrt, _MM_FROUND_FLOOR));
229 ewitab = _mm_slli_epi32(ewitab,2);
230 ewtabF = _mm_load_pd( ewtab + gmx_mm_extract_epi32(ewitab,0) );
231 ewtabD = _mm_load_pd( ewtab + gmx_mm_extract_epi32(ewitab,1) );
232 GMX_MM_TRANSPOSE2_PD(ewtabF,ewtabD);
233 ewtabV = _mm_load_sd( ewtab + gmx_mm_extract_epi32(ewitab,0) +2);
234 ewtabFn = _mm_load_sd( ewtab + gmx_mm_extract_epi32(ewitab,1) +2);
235 GMX_MM_TRANSPOSE2_PD(ewtabV,ewtabFn);
236 felec = _mm_add_pd(ewtabF,_mm_mul_pd(eweps,ewtabD));
237 velec = _mm_sub_pd(ewtabV,_mm_mul_pd(_mm_mul_pd(ewtabhalfspace,eweps),_mm_add_pd(ewtabF,felec)));
238 velec = _mm_mul_pd(qq00,_mm_sub_pd(_mm_sub_pd(rinv00,sh_ewald),velec));
239 felec = _mm_mul_pd(_mm_mul_pd(qq00,rinv00),_mm_sub_pd(rinvsq00,felec));
241 /* Analytical LJ-PME */
242 rinvsix = _mm_mul_pd(_mm_mul_pd(rinvsq00,rinvsq00),rinvsq00);
243 ewcljrsq = _mm_mul_pd(ewclj2,rsq00);
244 ewclj6 = _mm_mul_pd(ewclj2,_mm_mul_pd(ewclj2,ewclj2));
245 exponent = gmx_simd_exp_d(ewcljrsq);
246 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
247 poly = _mm_mul_pd(exponent,_mm_add_pd(_mm_sub_pd(one,ewcljrsq),_mm_mul_pd(_mm_mul_pd(ewcljrsq,ewcljrsq),one_half)));
248 /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
249 vvdw6 = _mm_mul_pd(_mm_sub_pd(c6_00,_mm_mul_pd(c6grid_00,_mm_sub_pd(one,poly))),rinvsix);
250 vvdw12 = _mm_mul_pd(c12_00,_mm_mul_pd(rinvsix,rinvsix));
251 vvdw = _mm_sub_pd(_mm_mul_pd( _mm_sub_pd(vvdw12 , _mm_mul_pd(c12_00,_mm_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))),one_twelfth),
252 _mm_mul_pd( _mm_sub_pd(vvdw6,_mm_add_pd(_mm_mul_pd(c6_00,sh_vdw_invrcut6),_mm_mul_pd(c6grid_00,sh_lj_ewald))),one_sixth));
253 /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
254 fvdw = _mm_mul_pd(_mm_sub_pd(vvdw12,_mm_sub_pd(vvdw6,_mm_mul_pd(_mm_mul_pd(c6grid_00,one_sixth),_mm_mul_pd(exponent,ewclj6)))),rinvsq00);
256 cutoff_mask = _mm_cmplt_pd(rsq00,rcutoff2);
258 /* Update potential sum for this i atom from the interaction with this j atom. */
259 velec = _mm_and_pd(velec,cutoff_mask);
260 velecsum = _mm_add_pd(velecsum,velec);
261 vvdw = _mm_and_pd(vvdw,cutoff_mask);
262 vvdwsum = _mm_add_pd(vvdwsum,vvdw);
264 fscal = _mm_add_pd(felec,fvdw);
266 fscal = _mm_and_pd(fscal,cutoff_mask);
268 /* Calculate temporary vectorial force */
269 tx = _mm_mul_pd(fscal,dx00);
270 ty = _mm_mul_pd(fscal,dy00);
271 tz = _mm_mul_pd(fscal,dz00);
273 /* Update vectorial force */
274 fix0 = _mm_add_pd(fix0,tx);
275 fiy0 = _mm_add_pd(fiy0,ty);
276 fiz0 = _mm_add_pd(fiz0,tz);
278 gmx_mm_decrement_1rvec_2ptr_swizzle_pd(f+j_coord_offsetA,f+j_coord_offsetB,tx,ty,tz);
282 /* Inner loop uses 81 flops */
289 j_coord_offsetA = DIM*jnrA;
291 /* load j atom coordinates */
292 gmx_mm_load_1rvec_1ptr_swizzle_pd(x+j_coord_offsetA,
295 /* Calculate displacement vector */
296 dx00 = _mm_sub_pd(ix0,jx0);
297 dy00 = _mm_sub_pd(iy0,jy0);
298 dz00 = _mm_sub_pd(iz0,jz0);
300 /* Calculate squared distance and things based on it */
301 rsq00 = gmx_mm_calc_rsq_pd(dx00,dy00,dz00);
303 rinv00 = gmx_mm_invsqrt_pd(rsq00);
305 rinvsq00 = _mm_mul_pd(rinv00,rinv00);
307 /* Load parameters for j particles */
308 jq0 = _mm_load_sd(charge+jnrA+0);
309 vdwjidx0A = 2*vdwtype[jnrA+0];
311 /**************************
312 * CALCULATE INTERACTIONS *
313 **************************/
315 if (gmx_mm_any_lt(rsq00,rcutoff2))
318 r00 = _mm_mul_pd(rsq00,rinv00);
320 /* Compute parameters for interactions between i and j atoms */
321 qq00 = _mm_mul_pd(iq0,jq0);
322 gmx_mm_load_1pair_swizzle_pd(vdwparam+vdwioffset0+vdwjidx0A,&c6_00,&c12_00);
324 c6grid_00 = gmx_mm_load_1real_pd(vdwgridparam+vdwioffset0+vdwjidx0A);
326 /* EWALD ELECTROSTATICS */
328 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
329 ewrt = _mm_mul_pd(r00,ewtabscale);
330 ewitab = _mm_cvttpd_epi32(ewrt);
331 eweps = _mm_sub_pd(ewrt,_mm_round_pd(ewrt, _MM_FROUND_FLOOR));
332 ewitab = _mm_slli_epi32(ewitab,2);
333 ewtabF = _mm_load_pd( ewtab + gmx_mm_extract_epi32(ewitab,0) );
334 ewtabD = _mm_setzero_pd();
335 GMX_MM_TRANSPOSE2_PD(ewtabF,ewtabD);
336 ewtabV = _mm_load_sd( ewtab + gmx_mm_extract_epi32(ewitab,0) +2);
337 ewtabFn = _mm_setzero_pd();
338 GMX_MM_TRANSPOSE2_PD(ewtabV,ewtabFn);
339 felec = _mm_add_pd(ewtabF,_mm_mul_pd(eweps,ewtabD));
340 velec = _mm_sub_pd(ewtabV,_mm_mul_pd(_mm_mul_pd(ewtabhalfspace,eweps),_mm_add_pd(ewtabF,felec)));
341 velec = _mm_mul_pd(qq00,_mm_sub_pd(_mm_sub_pd(rinv00,sh_ewald),velec));
342 felec = _mm_mul_pd(_mm_mul_pd(qq00,rinv00),_mm_sub_pd(rinvsq00,felec));
344 /* Analytical LJ-PME */
345 rinvsix = _mm_mul_pd(_mm_mul_pd(rinvsq00,rinvsq00),rinvsq00);
346 ewcljrsq = _mm_mul_pd(ewclj2,rsq00);
347 ewclj6 = _mm_mul_pd(ewclj2,_mm_mul_pd(ewclj2,ewclj2));
348 exponent = gmx_simd_exp_d(ewcljrsq);
349 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
350 poly = _mm_mul_pd(exponent,_mm_add_pd(_mm_sub_pd(one,ewcljrsq),_mm_mul_pd(_mm_mul_pd(ewcljrsq,ewcljrsq),one_half)));
351 /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
352 vvdw6 = _mm_mul_pd(_mm_sub_pd(c6_00,_mm_mul_pd(c6grid_00,_mm_sub_pd(one,poly))),rinvsix);
353 vvdw12 = _mm_mul_pd(c12_00,_mm_mul_pd(rinvsix,rinvsix));
354 vvdw = _mm_sub_pd(_mm_mul_pd( _mm_sub_pd(vvdw12 , _mm_mul_pd(c12_00,_mm_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))),one_twelfth),
355 _mm_mul_pd( _mm_sub_pd(vvdw6,_mm_add_pd(_mm_mul_pd(c6_00,sh_vdw_invrcut6),_mm_mul_pd(c6grid_00,sh_lj_ewald))),one_sixth));
356 /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
357 fvdw = _mm_mul_pd(_mm_sub_pd(vvdw12,_mm_sub_pd(vvdw6,_mm_mul_pd(_mm_mul_pd(c6grid_00,one_sixth),_mm_mul_pd(exponent,ewclj6)))),rinvsq00);
359 cutoff_mask = _mm_cmplt_pd(rsq00,rcutoff2);
361 /* Update potential sum for this i atom from the interaction with this j atom. */
362 velec = _mm_and_pd(velec,cutoff_mask);
363 velec = _mm_unpacklo_pd(velec,_mm_setzero_pd());
364 velecsum = _mm_add_pd(velecsum,velec);
365 vvdw = _mm_and_pd(vvdw,cutoff_mask);
366 vvdw = _mm_unpacklo_pd(vvdw,_mm_setzero_pd());
367 vvdwsum = _mm_add_pd(vvdwsum,vvdw);
369 fscal = _mm_add_pd(felec,fvdw);
371 fscal = _mm_and_pd(fscal,cutoff_mask);
373 fscal = _mm_unpacklo_pd(fscal,_mm_setzero_pd());
375 /* Calculate temporary vectorial force */
376 tx = _mm_mul_pd(fscal,dx00);
377 ty = _mm_mul_pd(fscal,dy00);
378 tz = _mm_mul_pd(fscal,dz00);
380 /* Update vectorial force */
381 fix0 = _mm_add_pd(fix0,tx);
382 fiy0 = _mm_add_pd(fiy0,ty);
383 fiz0 = _mm_add_pd(fiz0,tz);
385 gmx_mm_decrement_1rvec_1ptr_swizzle_pd(f+j_coord_offsetA,tx,ty,tz);
389 /* Inner loop uses 81 flops */
392 /* End of innermost loop */
394 gmx_mm_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
395 f+i_coord_offset,fshift+i_shift_offset);
398 /* Update potential energies */
399 gmx_mm_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
400 gmx_mm_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
402 /* Increment number of inner iterations */
403 inneriter += j_index_end - j_index_start;
405 /* Outer loop uses 9 flops */
408 /* Increment number of outer iterations */
411 /* Update outer/inner flops */
413 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*9 + inneriter*81);
416 * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJEwSh_GeomP1P1_F_sse4_1_double
417 * Electrostatics interaction: Ewald
418 * VdW interaction: LJEwald
419 * Geometry: Particle-Particle
420 * Calculate force/pot: Force
423 nb_kernel_ElecEwSh_VdwLJEwSh_GeomP1P1_F_sse4_1_double
424 (t_nblist * gmx_restrict nlist,
425 rvec * gmx_restrict xx,
426 rvec * gmx_restrict ff,
427 t_forcerec * gmx_restrict fr,
428 t_mdatoms * gmx_restrict mdatoms,
429 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
430 t_nrnb * gmx_restrict nrnb)
432 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
433 * just 0 for non-waters.
434 * Suffixes A,B refer to j loop unrolling done with SSE double precision, e.g. for the two different
435 * jnr indices corresponding to data put in the four positions in the SIMD register.
437 int i_shift_offset,i_coord_offset,outeriter,inneriter;
438 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
440 int j_coord_offsetA,j_coord_offsetB;
441 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
443 real *shiftvec,*fshift,*x,*f;
444 __m128d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
446 __m128d ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
447 int vdwjidx0A,vdwjidx0B;
448 __m128d jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
449 __m128d dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
450 __m128d velec,felec,velecsum,facel,crf,krf,krf2;
453 __m128d rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
456 __m128d one_sixth = _mm_set1_pd(1.0/6.0);
457 __m128d one_twelfth = _mm_set1_pd(1.0/12.0);
459 __m128d ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
461 __m128d one_half = _mm_set1_pd(0.5);
462 __m128d minus_one = _mm_set1_pd(-1.0);
464 __m128d ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
466 __m128d dummy_mask,cutoff_mask;
467 __m128d signbit = gmx_mm_castsi128_pd( _mm_set_epi32(0x80000000,0x00000000,0x80000000,0x00000000) );
468 __m128d one = _mm_set1_pd(1.0);
469 __m128d two = _mm_set1_pd(2.0);
475 jindex = nlist->jindex;
477 shiftidx = nlist->shift;
479 shiftvec = fr->shift_vec[0];
480 fshift = fr->fshift[0];
481 facel = _mm_set1_pd(fr->epsfac);
482 charge = mdatoms->chargeA;
483 nvdwtype = fr->ntype;
485 vdwtype = mdatoms->typeA;
486 vdwgridparam = fr->ljpme_c6grid;
487 sh_lj_ewald = _mm_set1_pd(fr->ic->sh_lj_ewald);
488 ewclj = _mm_set1_pd(fr->ewaldcoeff_lj);
489 ewclj2 = _mm_mul_pd(minus_one,_mm_mul_pd(ewclj,ewclj));
491 sh_ewald = _mm_set1_pd(fr->ic->sh_ewald);
492 ewtab = fr->ic->tabq_coul_F;
493 ewtabscale = _mm_set1_pd(fr->ic->tabq_scale);
494 ewtabhalfspace = _mm_set1_pd(0.5/fr->ic->tabq_scale);
496 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
497 rcutoff_scalar = fr->rcoulomb;
498 rcutoff = _mm_set1_pd(rcutoff_scalar);
499 rcutoff2 = _mm_mul_pd(rcutoff,rcutoff);
501 sh_vdw_invrcut6 = _mm_set1_pd(fr->ic->sh_invrc6);
502 rvdw = _mm_set1_pd(fr->rvdw);
504 /* Avoid stupid compiler warnings */
512 /* Start outer loop over neighborlists */
513 for(iidx=0; iidx<nri; iidx++)
515 /* Load shift vector for this list */
516 i_shift_offset = DIM*shiftidx[iidx];
518 /* Load limits for loop over neighbors */
519 j_index_start = jindex[iidx];
520 j_index_end = jindex[iidx+1];
522 /* Get outer coordinate index */
524 i_coord_offset = DIM*inr;
526 /* Load i particle coords and add shift vector */
527 gmx_mm_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
529 fix0 = _mm_setzero_pd();
530 fiy0 = _mm_setzero_pd();
531 fiz0 = _mm_setzero_pd();
533 /* Load parameters for i particles */
534 iq0 = _mm_mul_pd(facel,_mm_load1_pd(charge+inr+0));
535 vdwioffset0 = 2*nvdwtype*vdwtype[inr+0];
537 /* Start inner kernel loop */
538 for(jidx=j_index_start; jidx<j_index_end-1; jidx+=2)
541 /* Get j neighbor index, and coordinate index */
544 j_coord_offsetA = DIM*jnrA;
545 j_coord_offsetB = DIM*jnrB;
547 /* load j atom coordinates */
548 gmx_mm_load_1rvec_2ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
551 /* Calculate displacement vector */
552 dx00 = _mm_sub_pd(ix0,jx0);
553 dy00 = _mm_sub_pd(iy0,jy0);
554 dz00 = _mm_sub_pd(iz0,jz0);
556 /* Calculate squared distance and things based on it */
557 rsq00 = gmx_mm_calc_rsq_pd(dx00,dy00,dz00);
559 rinv00 = gmx_mm_invsqrt_pd(rsq00);
561 rinvsq00 = _mm_mul_pd(rinv00,rinv00);
563 /* Load parameters for j particles */
564 jq0 = gmx_mm_load_2real_swizzle_pd(charge+jnrA+0,charge+jnrB+0);
565 vdwjidx0A = 2*vdwtype[jnrA+0];
566 vdwjidx0B = 2*vdwtype[jnrB+0];
568 /**************************
569 * CALCULATE INTERACTIONS *
570 **************************/
572 if (gmx_mm_any_lt(rsq00,rcutoff2))
575 r00 = _mm_mul_pd(rsq00,rinv00);
577 /* Compute parameters for interactions between i and j atoms */
578 qq00 = _mm_mul_pd(iq0,jq0);
579 gmx_mm_load_2pair_swizzle_pd(vdwparam+vdwioffset0+vdwjidx0A,
580 vdwparam+vdwioffset0+vdwjidx0B,&c6_00,&c12_00);
581 c6grid_00 = gmx_mm_load_2real_swizzle_pd(vdwgridparam+vdwioffset0+vdwjidx0A,
582 vdwgridparam+vdwioffset0+vdwjidx0B);
584 /* EWALD ELECTROSTATICS */
586 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
587 ewrt = _mm_mul_pd(r00,ewtabscale);
588 ewitab = _mm_cvttpd_epi32(ewrt);
589 eweps = _mm_sub_pd(ewrt,_mm_round_pd(ewrt, _MM_FROUND_FLOOR));
590 gmx_mm_load_2pair_swizzle_pd(ewtab+gmx_mm_extract_epi32(ewitab,0),ewtab+gmx_mm_extract_epi32(ewitab,1),
592 felec = _mm_add_pd(_mm_mul_pd( _mm_sub_pd(one,eweps),ewtabF),_mm_mul_pd(eweps,ewtabFn));
593 felec = _mm_mul_pd(_mm_mul_pd(qq00,rinv00),_mm_sub_pd(rinvsq00,felec));
595 /* Analytical LJ-PME */
596 rinvsix = _mm_mul_pd(_mm_mul_pd(rinvsq00,rinvsq00),rinvsq00);
597 ewcljrsq = _mm_mul_pd(ewclj2,rsq00);
598 ewclj6 = _mm_mul_pd(ewclj2,_mm_mul_pd(ewclj2,ewclj2));
599 exponent = gmx_simd_exp_d(ewcljrsq);
600 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
601 poly = _mm_mul_pd(exponent,_mm_add_pd(_mm_sub_pd(one,ewcljrsq),_mm_mul_pd(_mm_mul_pd(ewcljrsq,ewcljrsq),one_half)));
602 /* f6A = 6 * C6grid * (1 - poly) */
603 f6A = _mm_mul_pd(c6grid_00,_mm_sub_pd(one,poly));
604 /* f6B = C6grid * exponent * beta^6 */
605 f6B = _mm_mul_pd(_mm_mul_pd(c6grid_00,one_sixth),_mm_mul_pd(exponent,ewclj6));
606 /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
607 fvdw = _mm_mul_pd(_mm_add_pd(_mm_mul_pd(_mm_sub_pd(_mm_mul_pd(c12_00,rinvsix),_mm_sub_pd(c6_00,f6A)),rinvsix),f6B),rinvsq00);
609 cutoff_mask = _mm_cmplt_pd(rsq00,rcutoff2);
611 fscal = _mm_add_pd(felec,fvdw);
613 fscal = _mm_and_pd(fscal,cutoff_mask);
615 /* Calculate temporary vectorial force */
616 tx = _mm_mul_pd(fscal,dx00);
617 ty = _mm_mul_pd(fscal,dy00);
618 tz = _mm_mul_pd(fscal,dz00);
620 /* Update vectorial force */
621 fix0 = _mm_add_pd(fix0,tx);
622 fiy0 = _mm_add_pd(fiy0,ty);
623 fiz0 = _mm_add_pd(fiz0,tz);
625 gmx_mm_decrement_1rvec_2ptr_swizzle_pd(f+j_coord_offsetA,f+j_coord_offsetB,tx,ty,tz);
629 /* Inner loop uses 62 flops */
636 j_coord_offsetA = DIM*jnrA;
638 /* load j atom coordinates */
639 gmx_mm_load_1rvec_1ptr_swizzle_pd(x+j_coord_offsetA,
642 /* Calculate displacement vector */
643 dx00 = _mm_sub_pd(ix0,jx0);
644 dy00 = _mm_sub_pd(iy0,jy0);
645 dz00 = _mm_sub_pd(iz0,jz0);
647 /* Calculate squared distance and things based on it */
648 rsq00 = gmx_mm_calc_rsq_pd(dx00,dy00,dz00);
650 rinv00 = gmx_mm_invsqrt_pd(rsq00);
652 rinvsq00 = _mm_mul_pd(rinv00,rinv00);
654 /* Load parameters for j particles */
655 jq0 = _mm_load_sd(charge+jnrA+0);
656 vdwjidx0A = 2*vdwtype[jnrA+0];
658 /**************************
659 * CALCULATE INTERACTIONS *
660 **************************/
662 if (gmx_mm_any_lt(rsq00,rcutoff2))
665 r00 = _mm_mul_pd(rsq00,rinv00);
667 /* Compute parameters for interactions between i and j atoms */
668 qq00 = _mm_mul_pd(iq0,jq0);
669 gmx_mm_load_1pair_swizzle_pd(vdwparam+vdwioffset0+vdwjidx0A,&c6_00,&c12_00);
671 c6grid_00 = gmx_mm_load_1real_pd(vdwgridparam+vdwioffset0+vdwjidx0A);
673 /* EWALD ELECTROSTATICS */
675 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
676 ewrt = _mm_mul_pd(r00,ewtabscale);
677 ewitab = _mm_cvttpd_epi32(ewrt);
678 eweps = _mm_sub_pd(ewrt,_mm_round_pd(ewrt, _MM_FROUND_FLOOR));
679 gmx_mm_load_1pair_swizzle_pd(ewtab+gmx_mm_extract_epi32(ewitab,0),&ewtabF,&ewtabFn);
680 felec = _mm_add_pd(_mm_mul_pd( _mm_sub_pd(one,eweps),ewtabF),_mm_mul_pd(eweps,ewtabFn));
681 felec = _mm_mul_pd(_mm_mul_pd(qq00,rinv00),_mm_sub_pd(rinvsq00,felec));
683 /* Analytical LJ-PME */
684 rinvsix = _mm_mul_pd(_mm_mul_pd(rinvsq00,rinvsq00),rinvsq00);
685 ewcljrsq = _mm_mul_pd(ewclj2,rsq00);
686 ewclj6 = _mm_mul_pd(ewclj2,_mm_mul_pd(ewclj2,ewclj2));
687 exponent = gmx_simd_exp_d(ewcljrsq);
688 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
689 poly = _mm_mul_pd(exponent,_mm_add_pd(_mm_sub_pd(one,ewcljrsq),_mm_mul_pd(_mm_mul_pd(ewcljrsq,ewcljrsq),one_half)));
690 /* f6A = 6 * C6grid * (1 - poly) */
691 f6A = _mm_mul_pd(c6grid_00,_mm_sub_pd(one,poly));
692 /* f6B = C6grid * exponent * beta^6 */
693 f6B = _mm_mul_pd(_mm_mul_pd(c6grid_00,one_sixth),_mm_mul_pd(exponent,ewclj6));
694 /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
695 fvdw = _mm_mul_pd(_mm_add_pd(_mm_mul_pd(_mm_sub_pd(_mm_mul_pd(c12_00,rinvsix),_mm_sub_pd(c6_00,f6A)),rinvsix),f6B),rinvsq00);
697 cutoff_mask = _mm_cmplt_pd(rsq00,rcutoff2);
699 fscal = _mm_add_pd(felec,fvdw);
701 fscal = _mm_and_pd(fscal,cutoff_mask);
703 fscal = _mm_unpacklo_pd(fscal,_mm_setzero_pd());
705 /* Calculate temporary vectorial force */
706 tx = _mm_mul_pd(fscal,dx00);
707 ty = _mm_mul_pd(fscal,dy00);
708 tz = _mm_mul_pd(fscal,dz00);
710 /* Update vectorial force */
711 fix0 = _mm_add_pd(fix0,tx);
712 fiy0 = _mm_add_pd(fiy0,ty);
713 fiz0 = _mm_add_pd(fiz0,tz);
715 gmx_mm_decrement_1rvec_1ptr_swizzle_pd(f+j_coord_offsetA,tx,ty,tz);
719 /* Inner loop uses 62 flops */
722 /* End of innermost loop */
724 gmx_mm_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
725 f+i_coord_offset,fshift+i_shift_offset);
727 /* Increment number of inner iterations */
728 inneriter += j_index_end - j_index_start;
730 /* Outer loop uses 7 flops */
733 /* Increment number of outer iterations */
736 /* Update outer/inner flops */
738 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*62);