2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2009, The GROMACS Development Team
6 * Copyright (c) 2012,2013, by the GROMACS development team, led by
7 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
8 * others, as listed in the AUTHORS file in the top-level source
9 * directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
38 /* This is the innermost loop contents for the 4 x N atom SIMD kernel.
39 * This flavor of the kernel calculates interactions of 4 i-atoms
40 * with N j-atoms stored in N wide SIMD registers.
44 /* When calculating RF or Ewald interactions we calculate the electrostatic
45 * forces on excluded atom pairs here in the non-bonded loops.
46 * But when energies and/or virial is required we calculate them
47 * separately to as then it is easier to separate the energy and virial
50 #if defined CHECK_EXCLS && defined CALC_COULOMB
54 /* Without exclusions and energies we only need to mask the cut-off,
55 * this can be faster with blendv (only available with SSE4.1 and later).
57 #if !(defined CHECK_EXCLS || defined CALC_ENERGIES) && defined GMX_X86_SSE4_1 && !defined COUNT_PAIRS
58 /* With RF and tabulated Coulomb we replace cmp+and with sub+blendv.
59 * With gcc this is slower, except for RF on Sandy Bridge.
60 * Tested with gcc 4.6.2, 4.6.3 and 4.7.1.
62 #if (defined CALC_COUL_RF || defined CALC_COUL_TAB) && (!defined __GNUC__ || (defined CALC_COUL_RF && defined GMX_X86_AVX_256))
65 /* With analytical Ewald we replace cmp+and+and with sub+blendv+blendv.
66 * This is only faster with icc on Sandy Bridge (PS kernel slower than gcc 4.7).
69 #if defined CALC_COUL_EWALD && defined __INTEL_COMPILER && defined GMX_X86_AVX_256
75 int cj, aj, ajx, ajy, ajz;
78 /* Energy group indices for two atoms packed into one int */
79 int egp_jj[UNROLLJ/2];
83 /* Interaction (non-exclusion) mask of all 1's or 0's */
90 gmx_mm_pr jxSSE, jySSE, jzSSE;
91 gmx_mm_pr dx_SSE0, dy_SSE0, dz_SSE0;
92 gmx_mm_pr dx_SSE1, dy_SSE1, dz_SSE1;
93 gmx_mm_pr dx_SSE2, dy_SSE2, dz_SSE2;
94 gmx_mm_pr dx_SSE3, dy_SSE3, dz_SSE3;
95 gmx_mm_pr tx_SSE0, ty_SSE0, tz_SSE0;
96 gmx_mm_pr tx_SSE1, ty_SSE1, tz_SSE1;
97 gmx_mm_pr tx_SSE2, ty_SSE2, tz_SSE2;
98 gmx_mm_pr tx_SSE3, ty_SSE3, tz_SSE3;
99 gmx_mm_pr rsq_SSE0, rinv_SSE0, rinvsq_SSE0;
100 gmx_mm_pr rsq_SSE1, rinv_SSE1, rinvsq_SSE1;
101 gmx_mm_pr rsq_SSE2, rinv_SSE2, rinvsq_SSE2;
102 gmx_mm_pr rsq_SSE3, rinv_SSE3, rinvsq_SSE3;
103 #ifndef CUTOFF_BLENDV
104 /* wco: within cut-off, mask of all 1's or 0's */
110 #ifdef VDW_CUTOFF_CHECK
111 gmx_mm_pr wco_vdw_SSE0;
112 gmx_mm_pr wco_vdw_SSE1;
114 gmx_mm_pr wco_vdw_SSE2;
115 gmx_mm_pr wco_vdw_SSE3;
120 /* 1/r masked with the interaction mask */
121 gmx_mm_pr rinv_ex_SSE0;
122 gmx_mm_pr rinv_ex_SSE1;
123 gmx_mm_pr rinv_ex_SSE2;
124 gmx_mm_pr rinv_ex_SSE3;
132 /* The force (PME mesh force) we need to subtract from 1/r^2 */
138 #ifdef CALC_COUL_EWALD
139 gmx_mm_pr brsq_SSE0, brsq_SSE1, brsq_SSE2, brsq_SSE3;
140 gmx_mm_pr ewcorr_SSE0, ewcorr_SSE1, ewcorr_SSE2, ewcorr_SSE3;
143 /* frcoul = (1/r - fsub)*r */
144 gmx_mm_pr frcoul_SSE0;
145 gmx_mm_pr frcoul_SSE1;
146 gmx_mm_pr frcoul_SSE2;
147 gmx_mm_pr frcoul_SSE3;
149 /* For tables: r, rs=r/sp, rf=floor(rs), frac=rs-rf */
150 gmx_mm_pr r_SSE0, rs_SSE0, rf_SSE0, frac_SSE0;
151 gmx_mm_pr r_SSE1, rs_SSE1, rf_SSE1, frac_SSE1;
152 gmx_mm_pr r_SSE2, rs_SSE2, rf_SSE2, frac_SSE2;
153 gmx_mm_pr r_SSE3, rs_SSE3, rf_SSE3, frac_SSE3;
154 /* Table index: rs truncated to an int */
155 #if !(defined GMX_MM256_HERE && defined GMX_DOUBLE)
156 gmx_epi32 ti_SSE0, ti_SSE1, ti_SSE2, ti_SSE3;
158 __m128i ti_SSE0, ti_SSE1, ti_SSE2, ti_SSE3;
160 /* Linear force table values */
161 gmx_mm_pr ctab0_SSE0, ctab1_SSE0;
162 gmx_mm_pr ctab0_SSE1, ctab1_SSE1;
163 gmx_mm_pr ctab0_SSE2, ctab1_SSE2;
164 gmx_mm_pr ctab0_SSE3, ctab1_SSE3;
166 /* Quadratic energy table value */
167 gmx_mm_pr ctabv_SSE0;
168 gmx_mm_pr ctabv_SSE1;
169 gmx_mm_pr ctabv_SSE2;
170 gmx_mm_pr ctabv_SSE3;
173 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
174 /* The potential (PME mesh) we need to subtract from 1/r */
175 gmx_mm_pr vc_sub_SSE0;
176 gmx_mm_pr vc_sub_SSE1;
177 gmx_mm_pr vc_sub_SSE2;
178 gmx_mm_pr vc_sub_SSE3;
181 /* Electrostatic potential */
182 gmx_mm_pr vcoul_SSE0;
183 gmx_mm_pr vcoul_SSE1;
184 gmx_mm_pr vcoul_SSE2;
185 gmx_mm_pr vcoul_SSE3;
188 /* The force times 1/r */
189 gmx_mm_pr fscal_SSE0;
190 gmx_mm_pr fscal_SSE1;
191 gmx_mm_pr fscal_SSE2;
192 gmx_mm_pr fscal_SSE3;
196 /* LJ sigma_j/2 and sqrt(epsilon_j) */
197 gmx_mm_pr hsig_j_SSE, seps_j_SSE;
198 /* LJ sigma_ij and epsilon_ij */
199 gmx_mm_pr sig_SSE0, eps_SSE0;
200 gmx_mm_pr sig_SSE1, eps_SSE1;
202 gmx_mm_pr sig_SSE2, eps_SSE2;
203 gmx_mm_pr sig_SSE3, eps_SSE3;
206 gmx_mm_pr sig2_SSE0, sig6_SSE0;
207 gmx_mm_pr sig2_SSE1, sig6_SSE1;
209 gmx_mm_pr sig2_SSE2, sig6_SSE2;
210 gmx_mm_pr sig2_SSE3, sig6_SSE3;
212 #endif /* LJ_COMB_LB */
216 gmx_mm_pr c6s_j_SSE, c12s_j_SSE;
219 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
220 /* Index for loading LJ parameters, complicated when interleaving */
225 /* LJ C6 and C12 parameters, used with geometric comb. rule */
226 gmx_mm_pr c6_SSE0, c12_SSE0;
227 gmx_mm_pr c6_SSE1, c12_SSE1;
229 gmx_mm_pr c6_SSE2, c12_SSE2;
230 gmx_mm_pr c6_SSE3, c12_SSE3;
234 /* Intermediate variables for LJ calculation */
236 gmx_mm_pr rinvsix_SSE0;
237 gmx_mm_pr rinvsix_SSE1;
239 gmx_mm_pr rinvsix_SSE2;
240 gmx_mm_pr rinvsix_SSE3;
244 gmx_mm_pr sir_SSE0, sir2_SSE0, sir6_SSE0;
245 gmx_mm_pr sir_SSE1, sir2_SSE1, sir6_SSE1;
247 gmx_mm_pr sir_SSE2, sir2_SSE2, sir6_SSE2;
248 gmx_mm_pr sir_SSE3, sir2_SSE3, sir6_SSE3;
252 gmx_mm_pr FrLJ6_SSE0, FrLJ12_SSE0;
253 gmx_mm_pr FrLJ6_SSE1, FrLJ12_SSE1;
255 gmx_mm_pr FrLJ6_SSE2, FrLJ12_SSE2;
256 gmx_mm_pr FrLJ6_SSE3, FrLJ12_SSE3;
259 gmx_mm_pr VLJ6_SSE0, VLJ12_SSE0, VLJ_SSE0;
260 gmx_mm_pr VLJ6_SSE1, VLJ12_SSE1, VLJ_SSE1;
262 gmx_mm_pr VLJ6_SSE2, VLJ12_SSE2, VLJ_SSE2;
263 gmx_mm_pr VLJ6_SSE3, VLJ12_SSE3, VLJ_SSE3;
268 /* j-cluster index */
271 /* Atom indices (of the first atom in the cluster) */
273 #if defined CALC_LJ && (defined LJ_COMB_GEOM || defined LJ_COMB_LB)
274 #if UNROLLJ == STRIDE
277 aj2 = (cj>>1)*2*STRIDE + (cj & 1)*UNROLLJ;
280 #if UNROLLJ == STRIDE
283 ajx = (cj>>1)*DIM*STRIDE + (cj & 1)*UNROLLJ;
289 #if defined GMX_X86_SSE2 && defined GMX_MM128_HERE
291 /* Load integer interaction mask */
292 __m128i mask_int = _mm_set1_epi32(l_cj[cjind].excl);
294 int_SSE0 = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int, mask0), zeroi_SSE));
295 int_SSE1 = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int, mask1), zeroi_SSE));
296 int_SSE2 = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int, mask2), zeroi_SSE));
297 int_SSE3 = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int, mask3), zeroi_SSE));
300 #if defined GMX_X86_SSE2 && defined GMX_MM256_HERE
303 /* Load integer interaction mask */
304 /* With AVX there are no integer operations, so cast to real */
305 gmx_mm_pr mask_pr = gmx_mm_castsi256_pr(_mm256_set1_epi32(l_cj[cjind].excl));
306 /* We can't compare all 4*8=32 float bits: shift the mask */
307 gmx_mm_pr masksh_pr = gmx_mm_castsi256_pr(_mm256_set1_epi32(l_cj[cjind].excl>>(2*UNROLLJ)));
308 /* Intel Compiler version 12.1.3 20120130 is buggy: use cast.
309 * With gcc we don't need the cast, but it's faster.
311 #define cast_cvt(x) _mm256_cvtepi32_ps(_mm256_castps_si256(x))
312 int_SSE0 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(mask_pr, mask0)), zero_SSE);
313 int_SSE1 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(mask_pr, mask1)), zero_SSE);
314 int_SSE2 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(masksh_pr, mask0)), zero_SSE);
315 int_SSE3 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(masksh_pr, mask1)), zero_SSE);
318 /* Load integer interaction mask */
319 /* With AVX there are no integer operations,
320 * and there is no int to double conversion, so cast to float
322 __m256 mask_ps = _mm256_castsi256_ps(_mm256_set1_epi32(l_cj[cjind].excl));
323 #define cast_cvt(x) _mm256_castps_pd(_mm256_cvtepi32_ps(_mm256_castps_si256(x)))
324 int_SSE0 = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps, mask0)), zero_SSE);
325 int_SSE1 = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps, mask1)), zero_SSE);
326 int_SSE2 = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps, mask2)), zero_SSE);
327 int_SSE3 = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps, mask3)), zero_SSE);
333 /* load j atom coordinates */
334 jxSSE = gmx_load_pr(x+ajx);
335 jySSE = gmx_load_pr(x+ajy);
336 jzSSE = gmx_load_pr(x+ajz);
338 /* Calculate distance */
339 dx_SSE0 = gmx_sub_pr(ix_SSE0, jxSSE);
340 dy_SSE0 = gmx_sub_pr(iy_SSE0, jySSE);
341 dz_SSE0 = gmx_sub_pr(iz_SSE0, jzSSE);
342 dx_SSE1 = gmx_sub_pr(ix_SSE1, jxSSE);
343 dy_SSE1 = gmx_sub_pr(iy_SSE1, jySSE);
344 dz_SSE1 = gmx_sub_pr(iz_SSE1, jzSSE);
345 dx_SSE2 = gmx_sub_pr(ix_SSE2, jxSSE);
346 dy_SSE2 = gmx_sub_pr(iy_SSE2, jySSE);
347 dz_SSE2 = gmx_sub_pr(iz_SSE2, jzSSE);
348 dx_SSE3 = gmx_sub_pr(ix_SSE3, jxSSE);
349 dy_SSE3 = gmx_sub_pr(iy_SSE3, jySSE);
350 dz_SSE3 = gmx_sub_pr(iz_SSE3, jzSSE);
352 /* rsq = dx*dx+dy*dy+dz*dz */
353 rsq_SSE0 = gmx_calc_rsq_pr(dx_SSE0, dy_SSE0, dz_SSE0);
354 rsq_SSE1 = gmx_calc_rsq_pr(dx_SSE1, dy_SSE1, dz_SSE1);
355 rsq_SSE2 = gmx_calc_rsq_pr(dx_SSE2, dy_SSE2, dz_SSE2);
356 rsq_SSE3 = gmx_calc_rsq_pr(dx_SSE3, dy_SSE3, dz_SSE3);
358 #ifndef CUTOFF_BLENDV
359 wco_SSE0 = gmx_cmplt_pr(rsq_SSE0, rc2_SSE);
360 wco_SSE1 = gmx_cmplt_pr(rsq_SSE1, rc2_SSE);
361 wco_SSE2 = gmx_cmplt_pr(rsq_SSE2, rc2_SSE);
362 wco_SSE3 = gmx_cmplt_pr(rsq_SSE3, rc2_SSE);
367 /* Only remove the (sub-)diagonal to avoid double counting */
368 #if UNROLLJ == UNROLLI
371 wco_SSE0 = gmx_and_pr(wco_SSE0, diag_SSE0);
372 wco_SSE1 = gmx_and_pr(wco_SSE1, diag_SSE1);
373 wco_SSE2 = gmx_and_pr(wco_SSE2, diag_SSE2);
374 wco_SSE3 = gmx_and_pr(wco_SSE3, diag_SSE3);
377 #if UNROLLJ < UNROLLI
380 wco_SSE0 = gmx_and_pr(wco_SSE0, diag0_SSE0);
381 wco_SSE1 = gmx_and_pr(wco_SSE1, diag0_SSE1);
382 wco_SSE2 = gmx_and_pr(wco_SSE2, diag0_SSE2);
383 wco_SSE3 = gmx_and_pr(wco_SSE3, diag0_SSE3);
385 if (cj == ci_sh*2 + 1)
387 wco_SSE0 = gmx_and_pr(wco_SSE0, diag1_SSE0);
388 wco_SSE1 = gmx_and_pr(wco_SSE1, diag1_SSE1);
389 wco_SSE2 = gmx_and_pr(wco_SSE2, diag1_SSE2);
390 wco_SSE3 = gmx_and_pr(wco_SSE3, diag1_SSE3);
395 wco_SSE0 = gmx_and_pr(wco_SSE0, diag0_SSE0);
396 wco_SSE1 = gmx_and_pr(wco_SSE1, diag0_SSE1);
397 wco_SSE2 = gmx_and_pr(wco_SSE2, diag0_SSE2);
398 wco_SSE3 = gmx_and_pr(wco_SSE3, diag0_SSE3);
400 else if (cj*2 + 1 == ci_sh)
402 wco_SSE0 = gmx_and_pr(wco_SSE0, diag1_SSE0);
403 wco_SSE1 = gmx_and_pr(wco_SSE1, diag1_SSE1);
404 wco_SSE2 = gmx_and_pr(wco_SSE2, diag1_SSE2);
405 wco_SSE3 = gmx_and_pr(wco_SSE3, diag1_SSE3);
409 #else /* EXCL_FORCES */
410 /* Remove all excluded atom pairs from the list */
411 wco_SSE0 = gmx_and_pr(wco_SSE0, int_SSE0);
412 wco_SSE1 = gmx_and_pr(wco_SSE1, int_SSE1);
413 wco_SSE2 = gmx_and_pr(wco_SSE2, int_SSE2);
414 wco_SSE3 = gmx_and_pr(wco_SSE3, int_SSE3);
422 for (i = 0; i < UNROLLI; i++)
424 gmx_storeu_pr(tmp, i == 0 ? wco_SSE0 : (i == 1 ? wco_SSE1 : (i == 2 ? wco_SSE2 : wco_SSE3)));
425 for (j = 0; j < UNROLLJ; j++)
437 /* For excluded pairs add a small number to avoid r^-6 = NaN */
438 rsq_SSE0 = gmx_add_pr(rsq_SSE0, gmx_andnot_pr(int_SSE0, avoid_sing_SSE));
439 rsq_SSE1 = gmx_add_pr(rsq_SSE1, gmx_andnot_pr(int_SSE1, avoid_sing_SSE));
440 rsq_SSE2 = gmx_add_pr(rsq_SSE2, gmx_andnot_pr(int_SSE2, avoid_sing_SSE));
441 rsq_SSE3 = gmx_add_pr(rsq_SSE3, gmx_andnot_pr(int_SSE3, avoid_sing_SSE));
446 rinv_SSE0 = gmx_invsqrt_pr(rsq_SSE0);
447 rinv_SSE1 = gmx_invsqrt_pr(rsq_SSE1);
448 rinv_SSE2 = gmx_invsqrt_pr(rsq_SSE2);
449 rinv_SSE3 = gmx_invsqrt_pr(rsq_SSE3);
451 GMX_MM_INVSQRT2_PD(rsq_SSE0, rsq_SSE1, rinv_SSE0, rinv_SSE1);
452 GMX_MM_INVSQRT2_PD(rsq_SSE2, rsq_SSE3, rinv_SSE2, rinv_SSE3);
456 /* Load parameters for j atom */
457 jq_SSE = gmx_load_pr(q+aj);
458 qq_SSE0 = gmx_mul_pr(iq_SSE0, jq_SSE);
459 qq_SSE1 = gmx_mul_pr(iq_SSE1, jq_SSE);
460 qq_SSE2 = gmx_mul_pr(iq_SSE2, jq_SSE);
461 qq_SSE3 = gmx_mul_pr(iq_SSE3, jq_SSE);
466 #if !defined LJ_COMB_GEOM && !defined LJ_COMB_LB && !defined FIX_LJ_C
467 load_lj_pair_params(nbfp0, type, aj, c6_SSE0, c12_SSE0);
468 load_lj_pair_params(nbfp1, type, aj, c6_SSE1, c12_SSE1);
470 load_lj_pair_params(nbfp2, type, aj, c6_SSE2, c12_SSE2);
471 load_lj_pair_params(nbfp3, type, aj, c6_SSE3, c12_SSE3);
473 #endif /* not defined any LJ rule */
476 c6s_j_SSE = gmx_load_pr(ljc+aj2+0);
477 c12s_j_SSE = gmx_load_pr(ljc+aj2+STRIDE);
478 c6_SSE0 = gmx_mul_pr(c6s_SSE0, c6s_j_SSE );
479 c6_SSE1 = gmx_mul_pr(c6s_SSE1, c6s_j_SSE );
481 c6_SSE2 = gmx_mul_pr(c6s_SSE2, c6s_j_SSE );
482 c6_SSE3 = gmx_mul_pr(c6s_SSE3, c6s_j_SSE );
484 c12_SSE0 = gmx_mul_pr(c12s_SSE0, c12s_j_SSE);
485 c12_SSE1 = gmx_mul_pr(c12s_SSE1, c12s_j_SSE);
487 c12_SSE2 = gmx_mul_pr(c12s_SSE2, c12s_j_SSE);
488 c12_SSE3 = gmx_mul_pr(c12s_SSE3, c12s_j_SSE);
490 #endif /* LJ_COMB_GEOM */
493 hsig_j_SSE = gmx_load_pr(ljc+aj2+0);
494 seps_j_SSE = gmx_load_pr(ljc+aj2+STRIDE);
496 sig_SSE0 = gmx_add_pr(hsig_i_SSE0, hsig_j_SSE);
497 sig_SSE1 = gmx_add_pr(hsig_i_SSE1, hsig_j_SSE);
498 eps_SSE0 = gmx_mul_pr(seps_i_SSE0, seps_j_SSE);
499 eps_SSE1 = gmx_mul_pr(seps_i_SSE1, seps_j_SSE);
501 sig_SSE2 = gmx_add_pr(hsig_i_SSE2, hsig_j_SSE);
502 sig_SSE3 = gmx_add_pr(hsig_i_SSE3, hsig_j_SSE);
503 eps_SSE2 = gmx_mul_pr(seps_i_SSE2, seps_j_SSE);
504 eps_SSE3 = gmx_mul_pr(seps_i_SSE3, seps_j_SSE);
506 #endif /* LJ_COMB_LB */
510 #ifndef CUTOFF_BLENDV
511 rinv_SSE0 = gmx_and_pr(rinv_SSE0, wco_SSE0);
512 rinv_SSE1 = gmx_and_pr(rinv_SSE1, wco_SSE1);
513 rinv_SSE2 = gmx_and_pr(rinv_SSE2, wco_SSE2);
514 rinv_SSE3 = gmx_and_pr(rinv_SSE3, wco_SSE3);
516 /* We only need to mask for the cut-off: blendv is faster */
517 rinv_SSE0 = gmx_blendv_pr(rinv_SSE0, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE0));
518 rinv_SSE1 = gmx_blendv_pr(rinv_SSE1, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE1));
519 rinv_SSE2 = gmx_blendv_pr(rinv_SSE2, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE2));
520 rinv_SSE3 = gmx_blendv_pr(rinv_SSE3, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE3));
523 rinvsq_SSE0 = gmx_mul_pr(rinv_SSE0, rinv_SSE0);
524 rinvsq_SSE1 = gmx_mul_pr(rinv_SSE1, rinv_SSE1);
525 rinvsq_SSE2 = gmx_mul_pr(rinv_SSE2, rinv_SSE2);
526 rinvsq_SSE3 = gmx_mul_pr(rinv_SSE3, rinv_SSE3);
529 /* Note that here we calculate force*r, not the usual force/r.
530 * This allows avoiding masking the reaction-field contribution,
531 * as frcoul is later multiplied by rinvsq which has been
532 * masked with the cut-off check.
536 /* Only add 1/r for non-excluded atom pairs */
537 rinv_ex_SSE0 = gmx_and_pr(rinv_SSE0, int_SSE0);
538 rinv_ex_SSE1 = gmx_and_pr(rinv_SSE1, int_SSE1);
539 rinv_ex_SSE2 = gmx_and_pr(rinv_SSE2, int_SSE2);
540 rinv_ex_SSE3 = gmx_and_pr(rinv_SSE3, int_SSE3);
542 /* No exclusion forces, we always need 1/r */
543 #define rinv_ex_SSE0 rinv_SSE0
544 #define rinv_ex_SSE1 rinv_SSE1
545 #define rinv_ex_SSE2 rinv_SSE2
546 #define rinv_ex_SSE3 rinv_SSE3
550 /* Electrostatic interactions */
551 frcoul_SSE0 = gmx_mul_pr(qq_SSE0, gmx_add_pr(rinv_ex_SSE0, gmx_mul_pr(rsq_SSE0, mrc_3_SSE)));
552 frcoul_SSE1 = gmx_mul_pr(qq_SSE1, gmx_add_pr(rinv_ex_SSE1, gmx_mul_pr(rsq_SSE1, mrc_3_SSE)));
553 frcoul_SSE2 = gmx_mul_pr(qq_SSE2, gmx_add_pr(rinv_ex_SSE2, gmx_mul_pr(rsq_SSE2, mrc_3_SSE)));
554 frcoul_SSE3 = gmx_mul_pr(qq_SSE3, gmx_add_pr(rinv_ex_SSE3, gmx_mul_pr(rsq_SSE3, mrc_3_SSE)));
557 vcoul_SSE0 = gmx_mul_pr(qq_SSE0, gmx_add_pr(rinv_ex_SSE0, gmx_add_pr(gmx_mul_pr(rsq_SSE0, hrc_3_SSE), moh_rc_SSE)));
558 vcoul_SSE1 = gmx_mul_pr(qq_SSE1, gmx_add_pr(rinv_ex_SSE1, gmx_add_pr(gmx_mul_pr(rsq_SSE1, hrc_3_SSE), moh_rc_SSE)));
559 vcoul_SSE2 = gmx_mul_pr(qq_SSE2, gmx_add_pr(rinv_ex_SSE2, gmx_add_pr(gmx_mul_pr(rsq_SSE2, hrc_3_SSE), moh_rc_SSE)));
560 vcoul_SSE3 = gmx_mul_pr(qq_SSE3, gmx_add_pr(rinv_ex_SSE3, gmx_add_pr(gmx_mul_pr(rsq_SSE3, hrc_3_SSE), moh_rc_SSE)));
564 #ifdef CALC_COUL_EWALD
565 /* We need to mask (or limit) rsq for the cut-off,
566 * as large distances can cause an overflow in gmx_pmecorrF/V.
568 #ifndef CUTOFF_BLENDV
569 brsq_SSE0 = gmx_mul_pr(beta2_SSE, gmx_and_pr(rsq_SSE0, wco_SSE0));
570 brsq_SSE1 = gmx_mul_pr(beta2_SSE, gmx_and_pr(rsq_SSE1, wco_SSE1));
571 brsq_SSE2 = gmx_mul_pr(beta2_SSE, gmx_and_pr(rsq_SSE2, wco_SSE2));
572 brsq_SSE3 = gmx_mul_pr(beta2_SSE, gmx_and_pr(rsq_SSE3, wco_SSE3));
574 /* Strangely, putting mul on a separate line is slower (icc 13) */
575 brsq_SSE0 = gmx_mul_pr(beta2_SSE, gmx_blendv_pr(rsq_SSE0, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE0)));
576 brsq_SSE1 = gmx_mul_pr(beta2_SSE, gmx_blendv_pr(rsq_SSE1, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE1)));
577 brsq_SSE2 = gmx_mul_pr(beta2_SSE, gmx_blendv_pr(rsq_SSE2, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE2)));
578 brsq_SSE3 = gmx_mul_pr(beta2_SSE, gmx_blendv_pr(rsq_SSE3, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE3)));
580 ewcorr_SSE0 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE0), beta_SSE);
581 ewcorr_SSE1 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE1), beta_SSE);
582 ewcorr_SSE2 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE2), beta_SSE);
583 ewcorr_SSE3 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE3), beta_SSE);
584 frcoul_SSE0 = gmx_mul_pr(qq_SSE0, gmx_add_pr(rinv_ex_SSE0, gmx_mul_pr(ewcorr_SSE0, brsq_SSE0)));
585 frcoul_SSE1 = gmx_mul_pr(qq_SSE1, gmx_add_pr(rinv_ex_SSE1, gmx_mul_pr(ewcorr_SSE1, brsq_SSE1)));
586 frcoul_SSE2 = gmx_mul_pr(qq_SSE2, gmx_add_pr(rinv_ex_SSE2, gmx_mul_pr(ewcorr_SSE2, brsq_SSE2)));
587 frcoul_SSE3 = gmx_mul_pr(qq_SSE3, gmx_add_pr(rinv_ex_SSE3, gmx_mul_pr(ewcorr_SSE3, brsq_SSE3)));
590 vc_sub_SSE0 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE0), beta_SSE);
591 vc_sub_SSE1 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE1), beta_SSE);
592 vc_sub_SSE2 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE2), beta_SSE);
593 vc_sub_SSE3 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE3), beta_SSE);
596 #endif /* CALC_COUL_EWALD */
599 /* Electrostatic interactions */
600 r_SSE0 = gmx_mul_pr(rsq_SSE0, rinv_SSE0);
601 r_SSE1 = gmx_mul_pr(rsq_SSE1, rinv_SSE1);
602 r_SSE2 = gmx_mul_pr(rsq_SSE2, rinv_SSE2);
603 r_SSE3 = gmx_mul_pr(rsq_SSE3, rinv_SSE3);
604 /* Convert r to scaled table units */
605 rs_SSE0 = gmx_mul_pr(r_SSE0, invtsp_SSE);
606 rs_SSE1 = gmx_mul_pr(r_SSE1, invtsp_SSE);
607 rs_SSE2 = gmx_mul_pr(r_SSE2, invtsp_SSE);
608 rs_SSE3 = gmx_mul_pr(r_SSE3, invtsp_SSE);
609 /* Truncate scaled r to an int */
610 ti_SSE0 = gmx_cvttpr_epi32(rs_SSE0);
611 ti_SSE1 = gmx_cvttpr_epi32(rs_SSE1);
612 ti_SSE2 = gmx_cvttpr_epi32(rs_SSE2);
613 ti_SSE3 = gmx_cvttpr_epi32(rs_SSE3);
614 #ifdef GMX_X86_SSE4_1
615 /* SSE4.1 floor is faster than gmx_cvtepi32_ps int->float cast */
616 rf_SSE0 = gmx_floor_pr(rs_SSE0);
617 rf_SSE1 = gmx_floor_pr(rs_SSE1);
618 rf_SSE2 = gmx_floor_pr(rs_SSE2);
619 rf_SSE3 = gmx_floor_pr(rs_SSE3);
621 rf_SSE0 = gmx_cvtepi32_pr(ti_SSE0);
622 rf_SSE1 = gmx_cvtepi32_pr(ti_SSE1);
623 rf_SSE2 = gmx_cvtepi32_pr(ti_SSE2);
624 rf_SSE3 = gmx_cvtepi32_pr(ti_SSE3);
626 frac_SSE0 = gmx_sub_pr(rs_SSE0, rf_SSE0);
627 frac_SSE1 = gmx_sub_pr(rs_SSE1, rf_SSE1);
628 frac_SSE2 = gmx_sub_pr(rs_SSE2, rf_SSE2);
629 frac_SSE3 = gmx_sub_pr(rs_SSE3, rf_SSE3);
631 /* Load and interpolate table forces and possibly energies.
632 * Force and energy can be combined in one table, stride 4: FDV0
633 * or in two separate tables with stride 1: F and V
634 * Currently single precision uses FDV0, double F and V.
636 #ifndef CALC_ENERGIES
637 load_table_f(tab_coul_F, ti_SSE0, ti0, ctab0_SSE0, ctab1_SSE0);
638 load_table_f(tab_coul_F, ti_SSE1, ti1, ctab0_SSE1, ctab1_SSE1);
639 load_table_f(tab_coul_F, ti_SSE2, ti2, ctab0_SSE2, ctab1_SSE2);
640 load_table_f(tab_coul_F, ti_SSE3, ti3, ctab0_SSE3, ctab1_SSE3);
643 load_table_f_v(tab_coul_F, ti_SSE0, ti0, ctab0_SSE0, ctab1_SSE0, ctabv_SSE0);
644 load_table_f_v(tab_coul_F, ti_SSE1, ti1, ctab0_SSE1, ctab1_SSE1, ctabv_SSE1);
645 load_table_f_v(tab_coul_F, ti_SSE2, ti2, ctab0_SSE2, ctab1_SSE2, ctabv_SSE2);
646 load_table_f_v(tab_coul_F, ti_SSE3, ti3, ctab0_SSE3, ctab1_SSE3, ctabv_SSE3);
648 load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE0, ti0, ctab0_SSE0, ctab1_SSE0, ctabv_SSE0);
649 load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE1, ti1, ctab0_SSE1, ctab1_SSE1, ctabv_SSE1);
650 load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE2, ti2, ctab0_SSE2, ctab1_SSE2, ctabv_SSE2);
651 load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE3, ti3, ctab0_SSE3, ctab1_SSE3, ctabv_SSE3);
654 fsub_SSE0 = gmx_add_pr(ctab0_SSE0, gmx_mul_pr(frac_SSE0, ctab1_SSE0));
655 fsub_SSE1 = gmx_add_pr(ctab0_SSE1, gmx_mul_pr(frac_SSE1, ctab1_SSE1));
656 fsub_SSE2 = gmx_add_pr(ctab0_SSE2, gmx_mul_pr(frac_SSE2, ctab1_SSE2));
657 fsub_SSE3 = gmx_add_pr(ctab0_SSE3, gmx_mul_pr(frac_SSE3, ctab1_SSE3));
658 frcoul_SSE0 = gmx_mul_pr(qq_SSE0, gmx_sub_pr(rinv_ex_SSE0, gmx_mul_pr(fsub_SSE0, r_SSE0)));
659 frcoul_SSE1 = gmx_mul_pr(qq_SSE1, gmx_sub_pr(rinv_ex_SSE1, gmx_mul_pr(fsub_SSE1, r_SSE1)));
660 frcoul_SSE2 = gmx_mul_pr(qq_SSE2, gmx_sub_pr(rinv_ex_SSE2, gmx_mul_pr(fsub_SSE2, r_SSE2)));
661 frcoul_SSE3 = gmx_mul_pr(qq_SSE3, gmx_sub_pr(rinv_ex_SSE3, gmx_mul_pr(fsub_SSE3, r_SSE3)));
664 vc_sub_SSE0 = gmx_add_pr(ctabv_SSE0, gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE, frac_SSE0), gmx_add_pr(ctab0_SSE0, fsub_SSE0)));
665 vc_sub_SSE1 = gmx_add_pr(ctabv_SSE1, gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE, frac_SSE1), gmx_add_pr(ctab0_SSE1, fsub_SSE1)));
666 vc_sub_SSE2 = gmx_add_pr(ctabv_SSE2, gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE, frac_SSE2), gmx_add_pr(ctab0_SSE2, fsub_SSE2)));
667 vc_sub_SSE3 = gmx_add_pr(ctabv_SSE3, gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE, frac_SSE3), gmx_add_pr(ctab0_SSE3, fsub_SSE3)));
669 #endif /* CALC_COUL_TAB */
671 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
672 #ifndef NO_SHIFT_EWALD
673 /* Add Ewald potential shift to vc_sub for convenience */
675 vc_sub_SSE0 = gmx_add_pr(vc_sub_SSE0, gmx_and_pr(sh_ewald_SSE, int_SSE0));
676 vc_sub_SSE1 = gmx_add_pr(vc_sub_SSE1, gmx_and_pr(sh_ewald_SSE, int_SSE1));
677 vc_sub_SSE2 = gmx_add_pr(vc_sub_SSE2, gmx_and_pr(sh_ewald_SSE, int_SSE2));
678 vc_sub_SSE3 = gmx_add_pr(vc_sub_SSE3, gmx_and_pr(sh_ewald_SSE, int_SSE3));
680 vc_sub_SSE0 = gmx_add_pr(vc_sub_SSE0, sh_ewald_SSE);
681 vc_sub_SSE1 = gmx_add_pr(vc_sub_SSE1, sh_ewald_SSE);
682 vc_sub_SSE2 = gmx_add_pr(vc_sub_SSE2, sh_ewald_SSE);
683 vc_sub_SSE3 = gmx_add_pr(vc_sub_SSE3, sh_ewald_SSE);
687 vcoul_SSE0 = gmx_mul_pr(qq_SSE0, gmx_sub_pr(rinv_ex_SSE0, vc_sub_SSE0));
688 vcoul_SSE1 = gmx_mul_pr(qq_SSE1, gmx_sub_pr(rinv_ex_SSE1, vc_sub_SSE1));
689 vcoul_SSE2 = gmx_mul_pr(qq_SSE2, gmx_sub_pr(rinv_ex_SSE2, vc_sub_SSE2));
690 vcoul_SSE3 = gmx_mul_pr(qq_SSE3, gmx_sub_pr(rinv_ex_SSE3, vc_sub_SSE3));
695 /* Mask energy for cut-off and diagonal */
696 vcoul_SSE0 = gmx_and_pr(vcoul_SSE0, wco_SSE0);
697 vcoul_SSE1 = gmx_and_pr(vcoul_SSE1, wco_SSE1);
698 vcoul_SSE2 = gmx_and_pr(vcoul_SSE2, wco_SSE2);
699 vcoul_SSE3 = gmx_and_pr(vcoul_SSE3, wco_SSE3);
702 #endif /* CALC_COULOMB */
705 /* Lennard-Jones interaction */
707 #ifdef VDW_CUTOFF_CHECK
708 wco_vdw_SSE0 = gmx_cmplt_pr(rsq_SSE0, rcvdw2_SSE);
709 wco_vdw_SSE1 = gmx_cmplt_pr(rsq_SSE1, rcvdw2_SSE);
711 wco_vdw_SSE2 = gmx_cmplt_pr(rsq_SSE2, rcvdw2_SSE);
712 wco_vdw_SSE3 = gmx_cmplt_pr(rsq_SSE3, rcvdw2_SSE);
715 /* Same cut-off for Coulomb and VdW, reuse the registers */
716 #define wco_vdw_SSE0 wco_SSE0
717 #define wco_vdw_SSE1 wco_SSE1
718 #define wco_vdw_SSE2 wco_SSE2
719 #define wco_vdw_SSE3 wco_SSE3
723 rinvsix_SSE0 = gmx_mul_pr(rinvsq_SSE0, gmx_mul_pr(rinvsq_SSE0, rinvsq_SSE0));
724 rinvsix_SSE1 = gmx_mul_pr(rinvsq_SSE1, gmx_mul_pr(rinvsq_SSE1, rinvsq_SSE1));
726 rinvsix_SSE0 = gmx_and_pr(rinvsix_SSE0, int_SSE0);
727 rinvsix_SSE1 = gmx_and_pr(rinvsix_SSE1, int_SSE1);
730 rinvsix_SSE2 = gmx_mul_pr(rinvsq_SSE2, gmx_mul_pr(rinvsq_SSE2, rinvsq_SSE2));
731 rinvsix_SSE3 = gmx_mul_pr(rinvsq_SSE3, gmx_mul_pr(rinvsq_SSE3, rinvsq_SSE3));
733 rinvsix_SSE2 = gmx_and_pr(rinvsix_SSE2, int_SSE2);
734 rinvsix_SSE3 = gmx_and_pr(rinvsix_SSE3, int_SSE3);
737 #ifdef VDW_CUTOFF_CHECK
738 rinvsix_SSE0 = gmx_and_pr(rinvsix_SSE0, wco_vdw_SSE0);
739 rinvsix_SSE1 = gmx_and_pr(rinvsix_SSE1, wco_vdw_SSE1);
741 rinvsix_SSE2 = gmx_and_pr(rinvsix_SSE2, wco_vdw_SSE2);
742 rinvsix_SSE3 = gmx_and_pr(rinvsix_SSE3, wco_vdw_SSE3);
745 FrLJ6_SSE0 = gmx_mul_pr(c6_SSE0, rinvsix_SSE0);
746 FrLJ6_SSE1 = gmx_mul_pr(c6_SSE1, rinvsix_SSE1);
748 FrLJ6_SSE2 = gmx_mul_pr(c6_SSE2, rinvsix_SSE2);
749 FrLJ6_SSE3 = gmx_mul_pr(c6_SSE3, rinvsix_SSE3);
751 FrLJ12_SSE0 = gmx_mul_pr(c12_SSE0, gmx_mul_pr(rinvsix_SSE0, rinvsix_SSE0));
752 FrLJ12_SSE1 = gmx_mul_pr(c12_SSE1, gmx_mul_pr(rinvsix_SSE1, rinvsix_SSE1));
754 FrLJ12_SSE2 = gmx_mul_pr(c12_SSE2, gmx_mul_pr(rinvsix_SSE2, rinvsix_SSE2));
755 FrLJ12_SSE3 = gmx_mul_pr(c12_SSE3, gmx_mul_pr(rinvsix_SSE3, rinvsix_SSE3));
757 #endif /* not LJ_COMB_LB */
760 sir_SSE0 = gmx_mul_pr(sig_SSE0, rinv_SSE0);
761 sir_SSE1 = gmx_mul_pr(sig_SSE1, rinv_SSE1);
763 sir_SSE2 = gmx_mul_pr(sig_SSE2, rinv_SSE2);
764 sir_SSE3 = gmx_mul_pr(sig_SSE3, rinv_SSE3);
766 sir2_SSE0 = gmx_mul_pr(sir_SSE0, sir_SSE0);
767 sir2_SSE1 = gmx_mul_pr(sir_SSE1, sir_SSE1);
769 sir2_SSE2 = gmx_mul_pr(sir_SSE2, sir_SSE2);
770 sir2_SSE3 = gmx_mul_pr(sir_SSE3, sir_SSE3);
772 sir6_SSE0 = gmx_mul_pr(sir2_SSE0, gmx_mul_pr(sir2_SSE0, sir2_SSE0));
773 sir6_SSE1 = gmx_mul_pr(sir2_SSE1, gmx_mul_pr(sir2_SSE1, sir2_SSE1));
775 sir6_SSE0 = gmx_and_pr(sir6_SSE0, int_SSE0);
776 sir6_SSE1 = gmx_and_pr(sir6_SSE1, int_SSE1);
779 sir6_SSE2 = gmx_mul_pr(sir2_SSE2, gmx_mul_pr(sir2_SSE2, sir2_SSE2));
780 sir6_SSE3 = gmx_mul_pr(sir2_SSE3, gmx_mul_pr(sir2_SSE3, sir2_SSE3));
782 sir6_SSE2 = gmx_and_pr(sir6_SSE2, int_SSE2);
783 sir6_SSE3 = gmx_and_pr(sir6_SSE3, int_SSE3);
786 #ifdef VDW_CUTOFF_CHECK
787 sir6_SSE0 = gmx_and_pr(sir6_SSE0, wco_vdw_SSE0);
788 sir6_SSE1 = gmx_and_pr(sir6_SSE1, wco_vdw_SSE1);
790 sir6_SSE2 = gmx_and_pr(sir6_SSE2, wco_vdw_SSE2);
791 sir6_SSE3 = gmx_and_pr(sir6_SSE3, wco_vdw_SSE3);
794 FrLJ6_SSE0 = gmx_mul_pr(eps_SSE0, sir6_SSE0);
795 FrLJ6_SSE1 = gmx_mul_pr(eps_SSE1, sir6_SSE1);
797 FrLJ6_SSE2 = gmx_mul_pr(eps_SSE2, sir6_SSE2);
798 FrLJ6_SSE3 = gmx_mul_pr(eps_SSE3, sir6_SSE3);
800 FrLJ12_SSE0 = gmx_mul_pr(FrLJ6_SSE0, sir6_SSE0);
801 FrLJ12_SSE1 = gmx_mul_pr(FrLJ6_SSE1, sir6_SSE1);
803 FrLJ12_SSE2 = gmx_mul_pr(FrLJ6_SSE2, sir6_SSE2);
804 FrLJ12_SSE3 = gmx_mul_pr(FrLJ6_SSE3, sir6_SSE3);
806 #if defined CALC_ENERGIES
807 /* We need C6 and C12 to calculate the LJ potential shift */
808 sig2_SSE0 = gmx_mul_pr(sig_SSE0, sig_SSE0);
809 sig2_SSE1 = gmx_mul_pr(sig_SSE1, sig_SSE1);
811 sig2_SSE2 = gmx_mul_pr(sig_SSE2, sig_SSE2);
812 sig2_SSE3 = gmx_mul_pr(sig_SSE3, sig_SSE3);
814 sig6_SSE0 = gmx_mul_pr(sig2_SSE0, gmx_mul_pr(sig2_SSE0, sig2_SSE0));
815 sig6_SSE1 = gmx_mul_pr(sig2_SSE1, gmx_mul_pr(sig2_SSE1, sig2_SSE1));
817 sig6_SSE2 = gmx_mul_pr(sig2_SSE2, gmx_mul_pr(sig2_SSE2, sig2_SSE2));
818 sig6_SSE3 = gmx_mul_pr(sig2_SSE3, gmx_mul_pr(sig2_SSE3, sig2_SSE3));
820 c6_SSE0 = gmx_mul_pr(eps_SSE0, sig6_SSE0);
821 c6_SSE1 = gmx_mul_pr(eps_SSE1, sig6_SSE1);
823 c6_SSE2 = gmx_mul_pr(eps_SSE2, sig6_SSE2);
824 c6_SSE3 = gmx_mul_pr(eps_SSE3, sig6_SSE3);
826 c12_SSE0 = gmx_mul_pr(c6_SSE0, sig6_SSE0);
827 c12_SSE1 = gmx_mul_pr(c6_SSE1, sig6_SSE1);
829 c12_SSE2 = gmx_mul_pr(c6_SSE2, sig6_SSE2);
830 c12_SSE3 = gmx_mul_pr(c6_SSE3, sig6_SSE3);
833 #endif /* LJ_COMB_LB */
839 /* Extract the group pair index per j pair.
840 * Energy groups are stored per i-cluster, so things get
841 * complicated when the i- and j-cluster size don't match.
846 egps_j = nbat->energrp[cj>>1];
847 egp_jj[0] = ((egps_j >> ((cj & 1)*egps_jshift)) & egps_jmask)*egps_jstride;
849 /* We assume UNROLLI <= UNROLLJ */
851 for (jdi = 0; jdi < UNROLLJ/UNROLLI; jdi++)
854 egps_j = nbat->energrp[cj*(UNROLLJ/UNROLLI)+jdi];
855 for (jj = 0; jj < (UNROLLI/2); jj++)
857 egp_jj[jdi*(UNROLLI/2)+jj] = ((egps_j >> (jj*egps_jshift)) & egps_jmask)*egps_jstride;
865 #ifndef ENERGY_GROUPS
866 vctotSSE = gmx_add_pr(vctotSSE, gmx_sum4_pr(vcoul_SSE0, vcoul_SSE1, vcoul_SSE2, vcoul_SSE3));
868 add_ener_grp(vcoul_SSE0, vctp[0], egp_jj);
869 add_ener_grp(vcoul_SSE1, vctp[1], egp_jj);
870 add_ener_grp(vcoul_SSE2, vctp[2], egp_jj);
871 add_ener_grp(vcoul_SSE3, vctp[3], egp_jj);
876 /* Calculate the LJ energies */
877 VLJ6_SSE0 = gmx_mul_pr(sixthSSE, gmx_sub_pr(FrLJ6_SSE0, gmx_mul_pr(c6_SSE0, sh_invrc6_SSE)));
878 VLJ6_SSE1 = gmx_mul_pr(sixthSSE, gmx_sub_pr(FrLJ6_SSE1, gmx_mul_pr(c6_SSE1, sh_invrc6_SSE)));
880 VLJ6_SSE2 = gmx_mul_pr(sixthSSE, gmx_sub_pr(FrLJ6_SSE2, gmx_mul_pr(c6_SSE2, sh_invrc6_SSE)));
881 VLJ6_SSE3 = gmx_mul_pr(sixthSSE, gmx_sub_pr(FrLJ6_SSE3, gmx_mul_pr(c6_SSE3, sh_invrc6_SSE)));
883 VLJ12_SSE0 = gmx_mul_pr(twelvethSSE, gmx_sub_pr(FrLJ12_SSE0, gmx_mul_pr(c12_SSE0, sh_invrc12_SSE)));
884 VLJ12_SSE1 = gmx_mul_pr(twelvethSSE, gmx_sub_pr(FrLJ12_SSE1, gmx_mul_pr(c12_SSE1, sh_invrc12_SSE)));
886 VLJ12_SSE2 = gmx_mul_pr(twelvethSSE, gmx_sub_pr(FrLJ12_SSE2, gmx_mul_pr(c12_SSE2, sh_invrc12_SSE)));
887 VLJ12_SSE3 = gmx_mul_pr(twelvethSSE, gmx_sub_pr(FrLJ12_SSE3, gmx_mul_pr(c12_SSE3, sh_invrc12_SSE)));
890 VLJ_SSE0 = gmx_sub_pr(VLJ12_SSE0, VLJ6_SSE0);
891 VLJ_SSE1 = gmx_sub_pr(VLJ12_SSE1, VLJ6_SSE1);
893 VLJ_SSE2 = gmx_sub_pr(VLJ12_SSE2, VLJ6_SSE2);
894 VLJ_SSE3 = gmx_sub_pr(VLJ12_SSE3, VLJ6_SSE3);
896 /* The potential shift should be removed for pairs beyond cut-off */
897 VLJ_SSE0 = gmx_and_pr(VLJ_SSE0, wco_vdw_SSE0);
898 VLJ_SSE1 = gmx_and_pr(VLJ_SSE1, wco_vdw_SSE1);
900 VLJ_SSE2 = gmx_and_pr(VLJ_SSE2, wco_vdw_SSE2);
901 VLJ_SSE3 = gmx_and_pr(VLJ_SSE3, wco_vdw_SSE3);
904 /* The potential shift should be removed for excluded pairs */
905 VLJ_SSE0 = gmx_and_pr(VLJ_SSE0, int_SSE0);
906 VLJ_SSE1 = gmx_and_pr(VLJ_SSE1, int_SSE1);
908 VLJ_SSE2 = gmx_and_pr(VLJ_SSE2, int_SSE2);
909 VLJ_SSE3 = gmx_and_pr(VLJ_SSE3, int_SSE3);
912 #ifndef ENERGY_GROUPS
913 VvdwtotSSE = gmx_add_pr(VvdwtotSSE,
915 gmx_sum4_pr(VLJ_SSE0, VLJ_SSE1, VLJ_SSE2, VLJ_SSE3)
917 gmx_add_pr(VLJ_SSE0, VLJ_SSE1)
921 add_ener_grp(VLJ_SSE0, vvdwtp[0], egp_jj);
922 add_ener_grp(VLJ_SSE1, vvdwtp[1], egp_jj);
924 add_ener_grp(VLJ_SSE2, vvdwtp[2], egp_jj);
925 add_ener_grp(VLJ_SSE3, vvdwtp[3], egp_jj);
929 #endif /* CALC_ENERGIES */
932 fscal_SSE0 = gmx_mul_pr(rinvsq_SSE0,
934 gmx_add_pr(frcoul_SSE0,
938 gmx_sub_pr(FrLJ12_SSE0, FrLJ6_SSE0)));
939 fscal_SSE1 = gmx_mul_pr(rinvsq_SSE1,
941 gmx_add_pr(frcoul_SSE1,
945 gmx_sub_pr(FrLJ12_SSE1, FrLJ6_SSE1)));
947 fscal_SSE0 = gmx_mul_pr(rinvsq_SSE0, frcoul_SSE0);
948 fscal_SSE1 = gmx_mul_pr(rinvsq_SSE1, frcoul_SSE1);
950 #if defined CALC_LJ && !defined HALF_LJ
951 fscal_SSE2 = gmx_mul_pr(rinvsq_SSE2,
953 gmx_add_pr(frcoul_SSE2,
957 gmx_sub_pr(FrLJ12_SSE2, FrLJ6_SSE2)));
958 fscal_SSE3 = gmx_mul_pr(rinvsq_SSE3,
960 gmx_add_pr(frcoul_SSE3,
964 gmx_sub_pr(FrLJ12_SSE3, FrLJ6_SSE3)));
966 /* Atom 2 and 3 don't have LJ, so only add Coulomb forces */
967 fscal_SSE2 = gmx_mul_pr(rinvsq_SSE2, frcoul_SSE2);
968 fscal_SSE3 = gmx_mul_pr(rinvsq_SSE3, frcoul_SSE3);
971 /* Calculate temporary vectorial force */
972 tx_SSE0 = gmx_mul_pr(fscal_SSE0, dx_SSE0);
973 tx_SSE1 = gmx_mul_pr(fscal_SSE1, dx_SSE1);
974 tx_SSE2 = gmx_mul_pr(fscal_SSE2, dx_SSE2);
975 tx_SSE3 = gmx_mul_pr(fscal_SSE3, dx_SSE3);
976 ty_SSE0 = gmx_mul_pr(fscal_SSE0, dy_SSE0);
977 ty_SSE1 = gmx_mul_pr(fscal_SSE1, dy_SSE1);
978 ty_SSE2 = gmx_mul_pr(fscal_SSE2, dy_SSE2);
979 ty_SSE3 = gmx_mul_pr(fscal_SSE3, dy_SSE3);
980 tz_SSE0 = gmx_mul_pr(fscal_SSE0, dz_SSE0);
981 tz_SSE1 = gmx_mul_pr(fscal_SSE1, dz_SSE1);
982 tz_SSE2 = gmx_mul_pr(fscal_SSE2, dz_SSE2);
983 tz_SSE3 = gmx_mul_pr(fscal_SSE3, dz_SSE3);
985 /* Increment i atom force */
986 fix_SSE0 = gmx_add_pr(fix_SSE0, tx_SSE0);
987 fix_SSE1 = gmx_add_pr(fix_SSE1, tx_SSE1);
988 fix_SSE2 = gmx_add_pr(fix_SSE2, tx_SSE2);
989 fix_SSE3 = gmx_add_pr(fix_SSE3, tx_SSE3);
990 fiy_SSE0 = gmx_add_pr(fiy_SSE0, ty_SSE0);
991 fiy_SSE1 = gmx_add_pr(fiy_SSE1, ty_SSE1);
992 fiy_SSE2 = gmx_add_pr(fiy_SSE2, ty_SSE2);
993 fiy_SSE3 = gmx_add_pr(fiy_SSE3, ty_SSE3);
994 fiz_SSE0 = gmx_add_pr(fiz_SSE0, tz_SSE0);
995 fiz_SSE1 = gmx_add_pr(fiz_SSE1, tz_SSE1);
996 fiz_SSE2 = gmx_add_pr(fiz_SSE2, tz_SSE2);
997 fiz_SSE3 = gmx_add_pr(fiz_SSE3, tz_SSE3);
999 /* Decrement j atom force */
1001 gmx_sub_pr( gmx_load_pr(f+ajx), gmx_sum4_pr(tx_SSE0, tx_SSE1, tx_SSE2, tx_SSE3) ));
1003 gmx_sub_pr( gmx_load_pr(f+ajy), gmx_sum4_pr(ty_SSE0, ty_SSE1, ty_SSE2, ty_SSE3) ));
1005 gmx_sub_pr( gmx_load_pr(f+ajz), gmx_sum4_pr(tz_SSE0, tz_SSE1, tz_SSE2, tz_SSE3) ));
1018 #undef CUTOFF_BLENDV