2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2009, The GROMACS Development Team
6 * Copyright (c) 2012,2013, by the GROMACS development team, led by
7 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
8 * others, as listed in the AUTHORS file in the top-level source
9 * directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
38 /* This is the innermost loop contents for the 4 x N atom SIMD kernel.
39 * This flavor of the kernel duplicates the data for N j-particles in
40 * 2xN wide SIMD registers to do operate on 2 i-particles at once.
41 * This leads to 4/2=2 sets of most instructions. Therefore we call
42 * this kernel 2x(N+N) = 2xnn
44 * This 2xnn kernel is basically the 4xn equivalent with half the registers
45 * and instructions removed.
47 * An alternative would be to load to different cluster of N j-particles
48 * into SIMD registers, giving a 4x(N+N) kernel. This doubles the amount
49 * of instructions, which could lead to better scheduling. But we actually
50 * observed worse scheduling for the AVX-256 4x8 normal analytical PME
51 * kernel, which has a lower pair throughput than 2x(4+4) with gcc 4.7.
52 * It could be worth trying this option, but it takes some more effort.
53 * This 2xnn kernel is basically the 4xn equivalent with
57 /* When calculating RF or Ewald interactions we calculate the electrostatic
58 * forces on excluded atom pairs here in the non-bonded loops.
59 * But when energies and/or virial is required we calculate them
60 * separately to as then it is easier to separate the energy and virial
63 #if defined CHECK_EXCLS && defined CALC_COULOMB
67 /* Without exclusions and energies we only need to mask the cut-off,
68 * this can be faster with blendv (only available with SSE4.1 and later).
70 #if !(defined CHECK_EXCLS || defined CALC_ENERGIES) && defined GMX_X86_SSE4_1 && !defined COUNT_PAIRS
71 /* With RF and tabulated Coulomb we replace cmp+and with sub+blendv.
72 * With gcc this is slower, except for RF on Sandy Bridge.
73 * Tested with gcc 4.6.2, 4.6.3 and 4.7.1.
75 #if (defined CALC_COUL_RF || defined CALC_COUL_TAB) && (!defined __GNUC__ || (defined CALC_COUL_RF && defined GMX_X86_AVX_256))
78 /* With analytical Ewald we replace cmp+and+and with sub+blendv+blendv.
79 * This is only faster with icc on Sandy Bridge (PS kernel slower than gcc 4.7).
82 #if defined CALC_COUL_EWALD && defined __INTEL_COMPILER && defined GMX_X86_AVX_256
88 int cj, aj, ajx, ajy, ajz;
91 /* Energy group indices for two atoms packed into one int */
92 int egp_jj[UNROLLJ/2];
96 /* Interaction (non-exclusion) mask of all 1's or 0's */
101 gmx_mm_pr jxSSE, jySSE, jzSSE;
102 gmx_mm_pr dx_SSE0, dy_SSE0, dz_SSE0;
103 gmx_mm_pr dx_SSE2, dy_SSE2, dz_SSE2;
104 gmx_mm_pr tx_SSE0, ty_SSE0, tz_SSE0;
105 gmx_mm_pr tx_SSE2, ty_SSE2, tz_SSE2;
106 gmx_mm_pr rsq_SSE0, rinv_SSE0, rinvsq_SSE0;
107 gmx_mm_pr rsq_SSE2, rinv_SSE2, rinvsq_SSE2;
108 #ifndef CUTOFF_BLENDV
109 /* wco: within cut-off, mask of all 1's or 0's */
113 #ifdef VDW_CUTOFF_CHECK
114 gmx_mm_pr wco_vdw_SSE0;
116 gmx_mm_pr wco_vdw_SSE2;
121 /* 1/r masked with the interaction mask */
122 gmx_mm_pr rinv_ex_SSE0;
123 gmx_mm_pr rinv_ex_SSE2;
129 /* The force (PME mesh force) we need to subtract from 1/r^2 */
133 #ifdef CALC_COUL_EWALD
134 gmx_mm_pr brsq_SSE0, brsq_SSE2;
135 gmx_mm_pr ewcorr_SSE0, ewcorr_SSE2;
138 /* frcoul = (1/r - fsub)*r */
139 gmx_mm_pr frcoul_SSE0;
140 gmx_mm_pr frcoul_SSE2;
142 /* For tables: r, rs=r/sp, rf=floor(rs), frac=rs-rf */
143 gmx_mm_pr r_SSE0, rs_SSE0, rf_SSE0, frac_SSE0;
144 gmx_mm_pr r_SSE2, rs_SSE2, rf_SSE2, frac_SSE2;
145 /* Table index: rs truncated to an int */
146 #if !(defined GMX_MM256_HERE && defined GMX_DOUBLE)
147 gmx_epi32 ti_SSE0, ti_SSE2;
149 __m128i ti_SSE0, ti_SSE2;
151 /* Linear force table values */
152 gmx_mm_pr ctab0_SSE0, ctab1_SSE0;
153 gmx_mm_pr ctab0_SSE2, ctab1_SSE2;
155 /* Quadratic energy table value */
156 gmx_mm_pr ctabv_SSE0;
157 gmx_mm_pr ctabv_SSE2;
160 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
161 /* The potential (PME mesh) we need to subtract from 1/r */
162 gmx_mm_pr vc_sub_SSE0;
163 gmx_mm_pr vc_sub_SSE2;
166 /* Electrostatic potential */
167 gmx_mm_pr vcoul_SSE0;
168 gmx_mm_pr vcoul_SSE2;
171 /* The force times 1/r */
172 gmx_mm_pr fscal_SSE0;
173 gmx_mm_pr fscal_SSE2;
177 /* LJ sigma_j/2 and sqrt(epsilon_j) */
178 gmx_mm_pr hsig_j_SSE, seps_j_SSE;
179 /* LJ sigma_ij and epsilon_ij */
180 gmx_mm_pr sig_SSE0, eps_SSE0;
182 gmx_mm_pr sig_SSE2, eps_SSE2;
185 gmx_mm_pr sig2_SSE0, sig6_SSE0;
187 gmx_mm_pr sig2_SSE2, sig6_SSE2;
189 #endif /* LJ_COMB_LB */
193 gmx_mm_pr c6s_j_SSE, c12s_j_SSE;
196 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
197 /* Index for loading LJ parameters, complicated when interleaving */
202 /* LJ C6 and C12 parameters, used with geometric comb. rule */
203 gmx_mm_pr c6_SSE0, c12_SSE0;
205 gmx_mm_pr c6_SSE2, c12_SSE2;
209 /* Intermediate variables for LJ calculation */
211 gmx_mm_pr rinvsix_SSE0;
213 gmx_mm_pr rinvsix_SSE2;
217 gmx_mm_pr sir_SSE0, sir2_SSE0, sir6_SSE0;
219 gmx_mm_pr sir_SSE2, sir2_SSE2, sir6_SSE2;
223 gmx_mm_pr FrLJ6_SSE0, FrLJ12_SSE0;
225 gmx_mm_pr FrLJ6_SSE2, FrLJ12_SSE2;
228 gmx_mm_pr VLJ6_SSE0, VLJ12_SSE0, VLJ_SSE0;
230 gmx_mm_pr VLJ6_SSE2, VLJ12_SSE2, VLJ_SSE2;
235 /* j-cluster index */
238 /* Atom indices (of the first atom in the cluster) */
240 #if defined CALC_LJ && (defined LJ_COMB_GEOM || defined LJ_COMB_LB)
241 #if UNROLLJ == STRIDE
244 aj2 = (cj>>1)*2*STRIDE + (cj & 1)*UNROLLJ;
247 #if UNROLLJ == STRIDE
250 ajx = (cj>>1)*DIM*STRIDE + (cj & 1)*UNROLLJ;
257 /* Load integer interaction mask */
258 /* With AVX there are no integer operations, so cast to real */
259 gmx_mm_pr mask_pr = gmx_mm_castsi256_pr(_mm256_set1_epi32(l_cj[cjind].excl));
260 /* Intel Compiler version 12.1.3 20120130 is buggy: use cast.
261 * With gcc we don't need the cast, but it's faster.
263 #define cast_cvt(x) _mm256_cvtepi32_ps(_mm256_castps_si256(x))
264 int_SSE0 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(mask_pr, mask0)), zero_SSE);
265 int_SSE2 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(mask_pr, mask2)), zero_SSE);
269 /* load j atom coordinates */
270 jxSSE = gmx_loaddh_pr(x+ajx);
271 jySSE = gmx_loaddh_pr(x+ajy);
272 jzSSE = gmx_loaddh_pr(x+ajz);
274 /* Calculate distance */
275 dx_SSE0 = gmx_sub_pr(ix_SSE0, jxSSE);
276 dy_SSE0 = gmx_sub_pr(iy_SSE0, jySSE);
277 dz_SSE0 = gmx_sub_pr(iz_SSE0, jzSSE);
278 dx_SSE2 = gmx_sub_pr(ix_SSE2, jxSSE);
279 dy_SSE2 = gmx_sub_pr(iy_SSE2, jySSE);
280 dz_SSE2 = gmx_sub_pr(iz_SSE2, jzSSE);
282 /* rsq = dx*dx+dy*dy+dz*dz */
283 rsq_SSE0 = gmx_calc_rsq_pr(dx_SSE0, dy_SSE0, dz_SSE0);
284 rsq_SSE2 = gmx_calc_rsq_pr(dx_SSE2, dy_SSE2, dz_SSE2);
286 #ifndef CUTOFF_BLENDV
287 wco_SSE0 = gmx_cmplt_pr(rsq_SSE0, rc2_SSE);
288 wco_SSE2 = gmx_cmplt_pr(rsq_SSE2, rc2_SSE);
293 /* Only remove the (sub-)diagonal to avoid double counting */
294 #if UNROLLJ == UNROLLI
297 wco_SSE0 = gmx_and_pr(wco_SSE0, diag_SSE0);
298 wco_SSE2 = gmx_and_pr(wco_SSE2, diag_SSE2);
301 #error "only UNROLLJ == UNROLLI currently supported in the joined kernels"
303 #else /* EXCL_FORCES */
304 /* Remove all excluded atom pairs from the list */
305 wco_SSE0 = gmx_and_pr(wco_SSE0, int_SSE0);
306 wco_SSE2 = gmx_and_pr(wco_SSE2, int_SSE2);
314 for (i = 0; i < UNROLLI; i++)
316 gmx_storeu_pr(tmp, i == 0 ? wco_SSE0 : (i == 1 ? wco_SSE1 : (i == 2 ? wco_SSE2 : wco_SSE3)));
317 for (j = 0; j < UNROLLJ; j++)
329 /* For excluded pairs add a small number to avoid r^-6 = NaN */
330 rsq_SSE0 = gmx_add_pr(rsq_SSE0, gmx_andnot_pr(int_SSE0, avoid_sing_SSE));
331 rsq_SSE2 = gmx_add_pr(rsq_SSE2, gmx_andnot_pr(int_SSE2, avoid_sing_SSE));
335 rinv_SSE0 = gmx_invsqrt_pr(rsq_SSE0);
336 rinv_SSE2 = gmx_invsqrt_pr(rsq_SSE2);
339 /* Load parameters for j atom */
340 jq_SSE = gmx_loaddh_pr(q+aj);
341 qq_SSE0 = gmx_mul_pr(iq_SSE0, jq_SSE);
342 qq_SSE2 = gmx_mul_pr(iq_SSE2, jq_SSE);
347 #if !defined LJ_COMB_GEOM && !defined LJ_COMB_LB && !defined FIX_LJ_C
348 load_lj_pair_params2(nbfp0, nbfp1, type, aj, c6_SSE0, c12_SSE0);
350 load_lj_pair_params2(nbfp2, nbfp3, type, aj, c6_SSE2, c12_SSE2);
352 #endif /* not defined any LJ rule */
355 c6s_j_SSE = gmx_loaddh_pr(ljc+aj2+0);
356 c12s_j_SSE = gmx_loaddh_pr(ljc+aj2+STRIDE);
357 c6_SSE0 = gmx_mul_pr(c6s_SSE0, c6s_j_SSE );
359 c6_SSE2 = gmx_mul_pr(c6s_SSE2, c6s_j_SSE );
361 c12_SSE0 = gmx_mul_pr(c12s_SSE0, c12s_j_SSE);
363 c12_SSE2 = gmx_mul_pr(c12s_SSE2, c12s_j_SSE);
365 #endif /* LJ_COMB_GEOM */
368 hsig_j_SSE = gmx_loaddh_pr(ljc+aj2+0);
369 seps_j_SSE = gmx_loaddh_pr(ljc+aj2+STRIDE);
371 sig_SSE0 = gmx_add_pr(hsig_i_SSE0, hsig_j_SSE);
372 eps_SSE0 = gmx_mul_pr(seps_i_SSE0, seps_j_SSE);
374 sig_SSE2 = gmx_add_pr(hsig_i_SSE2, hsig_j_SSE);
375 eps_SSE2 = gmx_mul_pr(seps_i_SSE2, seps_j_SSE);
377 #endif /* LJ_COMB_LB */
381 #ifndef CUTOFF_BLENDV
382 rinv_SSE0 = gmx_and_pr(rinv_SSE0, wco_SSE0);
383 rinv_SSE2 = gmx_and_pr(rinv_SSE2, wco_SSE2);
385 /* We only need to mask for the cut-off: blendv is faster */
386 rinv_SSE0 = gmx_blendv_pr(rinv_SSE0, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE0));
387 rinv_SSE2 = gmx_blendv_pr(rinv_SSE2, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE2));
390 rinvsq_SSE0 = gmx_mul_pr(rinv_SSE0, rinv_SSE0);
391 rinvsq_SSE2 = gmx_mul_pr(rinv_SSE2, rinv_SSE2);
394 /* Note that here we calculate force*r, not the usual force/r.
395 * This allows avoiding masking the reaction-field contribution,
396 * as frcoul is later multiplied by rinvsq which has been
397 * masked with the cut-off check.
401 /* Only add 1/r for non-excluded atom pairs */
402 rinv_ex_SSE0 = gmx_and_pr(rinv_SSE0, int_SSE0);
403 rinv_ex_SSE2 = gmx_and_pr(rinv_SSE2, int_SSE2);
405 /* No exclusion forces, we always need 1/r */
406 #define rinv_ex_SSE0 rinv_SSE0
407 #define rinv_ex_SSE2 rinv_SSE2
411 /* Electrostatic interactions */
412 frcoul_SSE0 = gmx_mul_pr(qq_SSE0, gmx_add_pr(rinv_ex_SSE0, gmx_mul_pr(rsq_SSE0, mrc_3_SSE)));
413 frcoul_SSE2 = gmx_mul_pr(qq_SSE2, gmx_add_pr(rinv_ex_SSE2, gmx_mul_pr(rsq_SSE2, mrc_3_SSE)));
416 vcoul_SSE0 = gmx_mul_pr(qq_SSE0, gmx_add_pr(rinv_ex_SSE0, gmx_add_pr(gmx_mul_pr(rsq_SSE0, hrc_3_SSE), moh_rc_SSE)));
417 vcoul_SSE2 = gmx_mul_pr(qq_SSE2, gmx_add_pr(rinv_ex_SSE2, gmx_add_pr(gmx_mul_pr(rsq_SSE2, hrc_3_SSE), moh_rc_SSE)));
421 #ifdef CALC_COUL_EWALD
422 /* We need to mask (or limit) rsq for the cut-off,
423 * as large distances can cause an overflow in gmx_pmecorrF/V.
425 #ifndef CUTOFF_BLENDV
426 brsq_SSE0 = gmx_mul_pr(beta2_SSE, gmx_and_pr(rsq_SSE0, wco_SSE0));
427 brsq_SSE2 = gmx_mul_pr(beta2_SSE, gmx_and_pr(rsq_SSE2, wco_SSE2));
429 /* Strangely, putting mul on a separate line is slower (icc 13) */
430 brsq_SSE0 = gmx_mul_pr(beta2_SSE, gmx_blendv_pr(rsq_SSE0, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE0)));
431 brsq_SSE2 = gmx_mul_pr(beta2_SSE, gmx_blendv_pr(rsq_SSE2, zero_SSE, gmx_sub_pr(rc2_SSE, rsq_SSE2)));
433 ewcorr_SSE0 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE0), beta_SSE);
434 ewcorr_SSE2 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE2), beta_SSE);
435 frcoul_SSE0 = gmx_mul_pr(qq_SSE0, gmx_add_pr(rinv_ex_SSE0, gmx_mul_pr(ewcorr_SSE0, brsq_SSE0)));
436 frcoul_SSE2 = gmx_mul_pr(qq_SSE2, gmx_add_pr(rinv_ex_SSE2, gmx_mul_pr(ewcorr_SSE2, brsq_SSE2)));
439 vc_sub_SSE0 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE0), beta_SSE);
440 vc_sub_SSE2 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE2), beta_SSE);
443 #endif /* CALC_COUL_EWALD */
446 /* Electrostatic interactions */
447 r_SSE0 = gmx_mul_pr(rsq_SSE0, rinv_SSE0);
448 r_SSE2 = gmx_mul_pr(rsq_SSE2, rinv_SSE2);
449 /* Convert r to scaled table units */
450 rs_SSE0 = gmx_mul_pr(r_SSE0, invtsp_SSE);
451 rs_SSE2 = gmx_mul_pr(r_SSE2, invtsp_SSE);
452 /* Truncate scaled r to an int */
453 ti_SSE0 = gmx_cvttpr_epi32(rs_SSE0);
454 ti_SSE2 = gmx_cvttpr_epi32(rs_SSE2);
455 #ifdef GMX_X86_SSE4_1
456 /* SSE4.1 floor is faster than gmx_cvtepi32_ps int->float cast */
457 rf_SSE0 = gmx_floor_pr(rs_SSE0);
458 rf_SSE2 = gmx_floor_pr(rs_SSE2);
460 rf_SSE0 = gmx_cvtepi32_pr(ti_SSE0);
461 rf_SSE2 = gmx_cvtepi32_pr(ti_SSE2);
463 frac_SSE0 = gmx_sub_pr(rs_SSE0, rf_SSE0);
464 frac_SSE2 = gmx_sub_pr(rs_SSE2, rf_SSE2);
466 /* Load and interpolate table forces and possibly energies.
467 * Force and energy can be combined in one table, stride 4: FDV0
468 * or in two separate tables with stride 1: F and V
469 * Currently single precision uses FDV0, double F and V.
471 #ifndef CALC_ENERGIES
472 load_table_f(tab_coul_F, ti_SSE0, ti0, ctab0_SSE0, ctab1_SSE0);
473 load_table_f(tab_coul_F, ti_SSE2, ti2, ctab0_SSE2, ctab1_SSE2);
476 load_table_f_v(tab_coul_F, ti_SSE0, ti0, ctab0_SSE0, ctab1_SSE0, ctabv_SSE0);
477 load_table_f_v(tab_coul_F, ti_SSE2, ti2, ctab0_SSE2, ctab1_SSE2, ctabv_SSE2);
479 load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE0, ti0, ctab0_SSE0, ctab1_SSE0, ctabv_SSE0);
480 load_table_f_v(tab_coul_F, tab_coul_V, ti_SSE2, ti2, ctab0_SSE2, ctab1_SSE2, ctabv_SSE2);
483 fsub_SSE0 = gmx_add_pr(ctab0_SSE0, gmx_mul_pr(frac_SSE0, ctab1_SSE0));
484 fsub_SSE2 = gmx_add_pr(ctab0_SSE2, gmx_mul_pr(frac_SSE2, ctab1_SSE2));
485 frcoul_SSE0 = gmx_mul_pr(qq_SSE0, gmx_sub_pr(rinv_ex_SSE0, gmx_mul_pr(fsub_SSE0, r_SSE0)));
486 frcoul_SSE2 = gmx_mul_pr(qq_SSE2, gmx_sub_pr(rinv_ex_SSE2, gmx_mul_pr(fsub_SSE2, r_SSE2)));
489 vc_sub_SSE0 = gmx_add_pr(ctabv_SSE0, gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE, frac_SSE0), gmx_add_pr(ctab0_SSE0, fsub_SSE0)));
490 vc_sub_SSE2 = gmx_add_pr(ctabv_SSE2, gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE, frac_SSE2), gmx_add_pr(ctab0_SSE2, fsub_SSE2)));
492 #endif /* CALC_COUL_TAB */
494 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
495 #ifndef NO_SHIFT_EWALD
496 /* Add Ewald potential shift to vc_sub for convenience */
498 vc_sub_SSE0 = gmx_add_pr(vc_sub_SSE0, gmx_and_pr(sh_ewald_SSE, int_SSE0));
499 vc_sub_SSE2 = gmx_add_pr(vc_sub_SSE2, gmx_and_pr(sh_ewald_SSE, int_SSE2));
501 vc_sub_SSE0 = gmx_add_pr(vc_sub_SSE0, sh_ewald_SSE);
502 vc_sub_SSE2 = gmx_add_pr(vc_sub_SSE2, sh_ewald_SSE);
506 vcoul_SSE0 = gmx_mul_pr(qq_SSE0, gmx_sub_pr(rinv_ex_SSE0, vc_sub_SSE0));
507 vcoul_SSE2 = gmx_mul_pr(qq_SSE2, gmx_sub_pr(rinv_ex_SSE2, vc_sub_SSE2));
511 /* Mask energy for cut-off and diagonal */
512 vcoul_SSE0 = gmx_and_pr(vcoul_SSE0, wco_SSE0);
513 vcoul_SSE2 = gmx_and_pr(vcoul_SSE2, wco_SSE2);
516 #endif /* CALC_COULOMB */
519 /* Lennard-Jones interaction */
521 #ifdef VDW_CUTOFF_CHECK
522 wco_vdw_SSE0 = gmx_cmplt_pr(rsq_SSE0, rcvdw2_SSE);
524 wco_vdw_SSE2 = gmx_cmplt_pr(rsq_SSE2, rcvdw2_SSE);
527 /* Same cut-off for Coulomb and VdW, reuse the registers */
528 #define wco_vdw_SSE0 wco_SSE0
529 #define wco_vdw_SSE2 wco_SSE2
533 rinvsix_SSE0 = gmx_mul_pr(rinvsq_SSE0, gmx_mul_pr(rinvsq_SSE0, rinvsq_SSE0));
535 rinvsix_SSE0 = gmx_and_pr(rinvsix_SSE0, int_SSE0);
538 rinvsix_SSE2 = gmx_mul_pr(rinvsq_SSE2, gmx_mul_pr(rinvsq_SSE2, rinvsq_SSE2));
540 rinvsix_SSE2 = gmx_and_pr(rinvsix_SSE2, int_SSE2);
543 #ifdef VDW_CUTOFF_CHECK
544 rinvsix_SSE0 = gmx_and_pr(rinvsix_SSE0, wco_vdw_SSE0);
546 rinvsix_SSE2 = gmx_and_pr(rinvsix_SSE2, wco_vdw_SSE2);
549 FrLJ6_SSE0 = gmx_mul_pr(c6_SSE0, rinvsix_SSE0);
551 FrLJ6_SSE2 = gmx_mul_pr(c6_SSE2, rinvsix_SSE2);
553 FrLJ12_SSE0 = gmx_mul_pr(c12_SSE0, gmx_mul_pr(rinvsix_SSE0, rinvsix_SSE0));
555 FrLJ12_SSE2 = gmx_mul_pr(c12_SSE2, gmx_mul_pr(rinvsix_SSE2, rinvsix_SSE2));
557 #endif /* not LJ_COMB_LB */
560 sir_SSE0 = gmx_mul_pr(sig_SSE0, rinv_SSE0);
562 sir_SSE2 = gmx_mul_pr(sig_SSE2, rinv_SSE2);
564 sir2_SSE0 = gmx_mul_pr(sir_SSE0, sir_SSE0);
566 sir2_SSE2 = gmx_mul_pr(sir_SSE2, sir_SSE2);
568 sir6_SSE0 = gmx_mul_pr(sir2_SSE0, gmx_mul_pr(sir2_SSE0, sir2_SSE0));
570 sir6_SSE0 = gmx_and_pr(sir6_SSE0, int_SSE0);
573 sir6_SSE2 = gmx_mul_pr(sir2_SSE2, gmx_mul_pr(sir2_SSE2, sir2_SSE2));
575 sir6_SSE2 = gmx_and_pr(sir6_SSE2, int_SSE2);
578 #ifdef VDW_CUTOFF_CHECK
579 sir6_SSE0 = gmx_and_pr(sir6_SSE0, wco_vdw_SSE0);
581 sir6_SSE2 = gmx_and_pr(sir6_SSE2, wco_vdw_SSE2);
584 FrLJ6_SSE0 = gmx_mul_pr(eps_SSE0, sir6_SSE0);
586 FrLJ6_SSE2 = gmx_mul_pr(eps_SSE2, sir6_SSE2);
588 FrLJ12_SSE0 = gmx_mul_pr(FrLJ6_SSE0, sir6_SSE0);
590 FrLJ12_SSE2 = gmx_mul_pr(FrLJ6_SSE2, sir6_SSE2);
592 #if defined CALC_ENERGIES
593 /* We need C6 and C12 to calculate the LJ potential shift */
594 sig2_SSE0 = gmx_mul_pr(sig_SSE0, sig_SSE0);
596 sig2_SSE2 = gmx_mul_pr(sig_SSE2, sig_SSE2);
598 sig6_SSE0 = gmx_mul_pr(sig2_SSE0, gmx_mul_pr(sig2_SSE0, sig2_SSE0));
600 sig6_SSE2 = gmx_mul_pr(sig2_SSE2, gmx_mul_pr(sig2_SSE2, sig2_SSE2));
602 c6_SSE0 = gmx_mul_pr(eps_SSE0, sig6_SSE0);
604 c6_SSE2 = gmx_mul_pr(eps_SSE2, sig6_SSE2);
606 c12_SSE0 = gmx_mul_pr(c6_SSE0, sig6_SSE0);
608 c12_SSE2 = gmx_mul_pr(c6_SSE2, sig6_SSE2);
611 #endif /* LJ_COMB_LB */
617 /* Extract the group pair index per j pair.
618 * Energy groups are stored per i-cluster, so things get
619 * complicated when the i- and j-cluster size don't match.
624 egps_j = nbat->energrp[cj>>1];
625 egp_jj[0] = ((egps_j >> ((cj & 1)*egps_jshift)) & egps_jmask)*egps_jstride;
627 /* We assume UNROLLI <= UNROLLJ */
629 for (jdi = 0; jdi < UNROLLJ/UNROLLI; jdi++)
632 egps_j = nbat->energrp[cj*(UNROLLJ/UNROLLI)+jdi];
633 for (jj = 0; jj < (UNROLLI/2); jj++)
635 egp_jj[jdi*(UNROLLI/2)+jj] = ((egps_j >> (jj*egps_jshift)) & egps_jmask)*egps_jstride;
643 #ifndef ENERGY_GROUPS
644 vctotSSE = gmx_add_pr(vctotSSE, gmx_add_pr(vcoul_SSE0, vcoul_SSE2));
646 add_ener_grp_halves(vcoul_SSE0, vctp[0], vctp[1], egp_jj);
647 add_ener_grp_halves(vcoul_SSE2, vctp[2], vctp[3], egp_jj);
652 /* Calculate the LJ energies */
653 VLJ6_SSE0 = gmx_mul_pr(sixthSSE, gmx_sub_pr(FrLJ6_SSE0, gmx_mul_pr(c6_SSE0, sh_invrc6_SSE)));
655 VLJ6_SSE2 = gmx_mul_pr(sixthSSE, gmx_sub_pr(FrLJ6_SSE2, gmx_mul_pr(c6_SSE2, sh_invrc6_SSE)));
657 VLJ12_SSE0 = gmx_mul_pr(twelvethSSE, gmx_sub_pr(FrLJ12_SSE0, gmx_mul_pr(c12_SSE0, sh_invrc12_SSE)));
659 VLJ12_SSE2 = gmx_mul_pr(twelvethSSE, gmx_sub_pr(FrLJ12_SSE2, gmx_mul_pr(c12_SSE2, sh_invrc12_SSE)));
662 VLJ_SSE0 = gmx_sub_pr(VLJ12_SSE0, VLJ6_SSE0);
664 VLJ_SSE2 = gmx_sub_pr(VLJ12_SSE2, VLJ6_SSE2);
666 /* The potential shift should be removed for pairs beyond cut-off */
667 VLJ_SSE0 = gmx_and_pr(VLJ_SSE0, wco_vdw_SSE0);
669 VLJ_SSE2 = gmx_and_pr(VLJ_SSE2, wco_vdw_SSE2);
672 /* The potential shift should be removed for excluded pairs */
673 VLJ_SSE0 = gmx_and_pr(VLJ_SSE0, int_SSE0);
675 VLJ_SSE2 = gmx_and_pr(VLJ_SSE2, int_SSE2);
678 #ifndef ENERGY_GROUPS
679 VvdwtotSSE = gmx_add_pr(VvdwtotSSE,
681 gmx_add_pr(VLJ_SSE0, VLJ_SSE2)
687 add_ener_grp_halves(VLJ_SSE0, vvdwtp[0], vvdwtp[1], egp_jj);
689 add_ener_grp_halves(VLJ_SSE2, vvdwtp[2], vvdwtp[3], egp_jj);
693 #endif /* CALC_ENERGIES */
696 fscal_SSE0 = gmx_mul_pr(rinvsq_SSE0,
698 gmx_add_pr(frcoul_SSE0,
702 gmx_sub_pr(FrLJ12_SSE0, FrLJ6_SSE0)));
704 fscal_SSE0 = gmx_mul_pr(rinvsq_SSE0, frcoul_SSE0);
706 #if defined CALC_LJ && !defined HALF_LJ
707 fscal_SSE2 = gmx_mul_pr(rinvsq_SSE2,
709 gmx_add_pr(frcoul_SSE2,
713 gmx_sub_pr(FrLJ12_SSE2, FrLJ6_SSE2)));
715 /* Atom 2 and 3 don't have LJ, so only add Coulomb forces */
716 fscal_SSE2 = gmx_mul_pr(rinvsq_SSE2, frcoul_SSE2);
719 /* Calculate temporary vectorial force */
720 tx_SSE0 = gmx_mul_pr(fscal_SSE0, dx_SSE0);
721 tx_SSE2 = gmx_mul_pr(fscal_SSE2, dx_SSE2);
722 ty_SSE0 = gmx_mul_pr(fscal_SSE0, dy_SSE0);
723 ty_SSE2 = gmx_mul_pr(fscal_SSE2, dy_SSE2);
724 tz_SSE0 = gmx_mul_pr(fscal_SSE0, dz_SSE0);
725 tz_SSE2 = gmx_mul_pr(fscal_SSE2, dz_SSE2);
727 /* Increment i atom force */
728 fix_SSE0 = gmx_add_pr(fix_SSE0, tx_SSE0);
729 fix_SSE2 = gmx_add_pr(fix_SSE2, tx_SSE2);
730 fiy_SSE0 = gmx_add_pr(fiy_SSE0, ty_SSE0);
731 fiy_SSE2 = gmx_add_pr(fiy_SSE2, ty_SSE2);
732 fiz_SSE0 = gmx_add_pr(fiz_SSE0, tz_SSE0);
733 fiz_SSE2 = gmx_add_pr(fiz_SSE2, tz_SSE2);
735 /* Decrement j atom force */
737 gmx_sub_hpr( gmx_load_hpr(f+ajx), gmx_sum4_hpr(tx_SSE0, tx_SSE2) ));
739 gmx_sub_hpr( gmx_load_hpr(f+ajy), gmx_sum4_hpr(ty_SSE0, ty_SSE2) ));
741 gmx_sub_hpr( gmx_load_hpr(f+ajz), gmx_sum4_hpr(tz_SSE0, tz_SSE2) ));