2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2009, The GROMACS Development Team
6 * Copyright (c) 2012, by the GROMACS development team, led by
7 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
8 * others, as listed in the AUTHORS file in the top-level source
9 * directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
38 /* This is the innermost loop contents for the 4 x N atom SIMD kernel.
39 * This flavor of the kernel duplicates the data for N j-particles in
40 * 2xN wide SIMD registers to do operate on 2 i-particles at once.
41 * This leads to 4/2=2 sets of most instructions. Therefore we call
42 * this kernel 2x(N+N) = 2xnn
44 * This 2xnn kernel is basically the 4xn equivalent with half the registers
45 * and instructions removed.
47 * An alternative would be to load to different cluster of N j-particles
48 * into SIMD registers, giving a 4x(N+N) kernel. This doubles the amount
49 * of instructions, which could lead to better scheduling. But we actually
50 * observed worse scheduling for the AVX-256 4x8 normal analytical PME
51 * kernel, which has a lower pair throughput than 2x(4+4) with gcc 4.7.
52 * It could be worth trying this option, but it takes some more effort.
53 * This 2xnn kernel is basically the 4xn equivalent with
57 /* When calculating RF or Ewald interactions we calculate the electrostatic
58 * forces on excluded atom pairs here in the non-bonded loops.
59 * But when energies and/or virial is required we calculate them
60 * separately to as then it is easier to separate the energy and virial
63 #if defined CHECK_EXCLS && defined CALC_COULOMB
67 /* Without exclusions and energies we only need to mask the cut-off,
68 * this can be faster with blendv.
70 #if !(defined CHECK_EXCLS || defined CALC_ENERGIES) && defined GMX_SIMD_HAVE_BLENDV && !defined COUNT_PAIRS
71 /* With RF and tabulated Coulomb we replace cmp+and with sub+blendv.
72 * With gcc this is slower, except for RF on Sandy Bridge.
73 * Tested with gcc 4.6.2, 4.6.3 and 4.7.1.
75 #if (defined CALC_COUL_RF || defined CALC_COUL_TAB) && (!defined __GNUC__ || (defined CALC_COUL_RF && defined GMX_X86_AVX_256))
78 /* With analytical Ewald we replace cmp+and+and with sub+blendv+blendv.
79 * This is only faster with icc on Sandy Bridge (PS kernel slower than gcc 4.7).
82 #if defined CALC_COUL_EWALD && defined __INTEL_COMPILER && defined GMX_X86_AVX_256
88 int cj, aj, ajx, ajy, ajz;
91 /* Energy group indices for two atoms packed into one int */
92 int egp_jj[UNROLLJ/2];
96 /* Interaction (non-exclusion) mask of all 1's or 0's */
97 gmx_mm_pb interact_S0;
98 gmx_mm_pb interact_S2;
101 gmx_mm_pr jx_S, jy_S, jz_S;
102 gmx_mm_pr dx_S0, dy_S0, dz_S0;
103 gmx_mm_pr dx_S2, dy_S2, dz_S2;
104 gmx_mm_pr tx_S0, ty_S0, tz_S0;
105 gmx_mm_pr tx_S2, ty_S2, tz_S2;
106 gmx_mm_pr rsq_S0, rinv_S0, rinvsq_S0;
107 gmx_mm_pr rsq_S2, rinv_S2, rinvsq_S2;
108 #ifndef CUTOFF_BLENDV
109 /* wco: within cut-off, mask of all 1's or 0's */
113 #ifdef VDW_CUTOFF_CHECK
114 gmx_mm_pb wco_vdw_S0;
116 gmx_mm_pb wco_vdw_S2;
121 /* 1/r masked with the interaction mask */
122 gmx_mm_pr rinv_ex_S0;
123 gmx_mm_pr rinv_ex_S2;
129 /* The force (PME mesh force) we need to subtract from 1/r^2 */
133 #ifdef CALC_COUL_EWALD
134 gmx_mm_pr brsq_S0, brsq_S2;
135 gmx_mm_pr ewcorr_S0, ewcorr_S2;
138 /* frcoul = (1/r - fsub)*r */
142 /* For tables: r, rs=r/sp, rf=floor(rs), frac=rs-rf */
143 gmx_mm_pr r_S0, rs_S0, rf_S0, frac_S0;
144 gmx_mm_pr r_S2, rs_S2, rf_S2, frac_S2;
145 /* Table index: rs truncated to an int */
146 gmx_epi32 ti_S0, ti_S2;
147 /* Linear force table values */
148 gmx_mm_pr ctab0_S0, ctab1_S0;
149 gmx_mm_pr ctab0_S2, ctab1_S2;
151 /* Quadratic energy table value */
156 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
157 /* The potential (PME mesh) we need to subtract from 1/r */
162 /* Electrostatic potential */
167 /* The force times 1/r */
173 /* LJ sigma_j/2 and sqrt(epsilon_j) */
174 gmx_mm_pr hsig_j_S, seps_j_S;
175 /* LJ sigma_ij and epsilon_ij */
176 gmx_mm_pr sig_S0, eps_S0;
178 gmx_mm_pr sig_S2, eps_S2;
181 gmx_mm_pr sig2_S0, sig6_S0;
183 gmx_mm_pr sig2_S2, sig6_S2;
185 #endif /* LJ_COMB_LB */
189 gmx_mm_pr c6s_j_S, c12s_j_S;
192 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
193 /* Index for loading LJ parameters, complicated when interleaving */
198 /* LJ C6 and C12 parameters, used with geometric comb. rule */
199 gmx_mm_pr c6_S0, c12_S0;
201 gmx_mm_pr c6_S2, c12_S2;
205 /* Intermediate variables for LJ calculation */
207 gmx_mm_pr rinvsix_S0;
209 gmx_mm_pr rinvsix_S2;
213 gmx_mm_pr sir_S0, sir2_S0, sir6_S0;
215 gmx_mm_pr sir_S2, sir2_S2, sir6_S2;
219 gmx_mm_pr FrLJ6_S0, FrLJ12_S0;
221 gmx_mm_pr FrLJ6_S2, FrLJ12_S2;
224 gmx_mm_pr VLJ6_S0, VLJ12_S0, VLJ_S0;
226 gmx_mm_pr VLJ6_S2, VLJ12_S2, VLJ_S2;
231 gmx_mm_hpr fjx_S, fjy_S, fjz_S;
233 /* j-cluster index */
236 /* Atom indices (of the first atom in the cluster) */
238 #if defined CALC_LJ && (defined LJ_COMB_GEOM || defined LJ_COMB_LB)
239 #if UNROLLJ == STRIDE
242 aj2 = (cj>>1)*2*STRIDE + (cj & 1)*UNROLLJ;
245 #if UNROLLJ == STRIDE
248 ajx = (cj>>1)*DIM*STRIDE + (cj & 1)*UNROLLJ;
254 #ifdef GMX_SIMD_HAVE_CHECKBITMASK_EPI32
256 /* Load integer topology exclusion interaction mask */
257 gmx_epi32 mask_pr_S = gmx_set1_epi32(l_cj[cjind].excl);
259 interact_S0 = gmx_checkbitmask_epi32(mask_pr_S, filter_S0);
260 interact_S2 = gmx_checkbitmask_epi32(mask_pr_S, filter_S2);
263 #ifdef GMX_SIMD_HAVE_CHECKBITMASK_PR
265 /* Integer mask set, cast to real and real mask operations */
266 gmx_mm_pr mask_pr_S = gmx_castsi_pr(gmx_set1_epi32(l_cj[cjind].excl));
268 interact_S0 = gmx_checkbitmask_pr(mask_pr_S, filter_S0);
269 interact_S2 = gmx_checkbitmask_pr(mask_pr_S, filter_S2);
272 #error "No SIMD bitmask operation available"
275 #endif /* CHECK_EXCLS */
277 /* load j atom coordinates */
278 gmx_loaddh_pr(&jx_S, x+ajx);
279 gmx_loaddh_pr(&jy_S, x+ajy);
280 gmx_loaddh_pr(&jz_S, x+ajz);
282 /* Calculate distance */
283 dx_S0 = gmx_sub_pr(ix_S0, jx_S);
284 dy_S0 = gmx_sub_pr(iy_S0, jy_S);
285 dz_S0 = gmx_sub_pr(iz_S0, jz_S);
286 dx_S2 = gmx_sub_pr(ix_S2, jx_S);
287 dy_S2 = gmx_sub_pr(iy_S2, jy_S);
288 dz_S2 = gmx_sub_pr(iz_S2, jz_S);
290 /* rsq = dx*dx+dy*dy+dz*dz */
291 rsq_S0 = gmx_calc_rsq_pr(dx_S0, dy_S0, dz_S0);
292 rsq_S2 = gmx_calc_rsq_pr(dx_S2, dy_S2, dz_S2);
294 #ifndef CUTOFF_BLENDV
295 wco_S0 = gmx_cmplt_pr(rsq_S0, rc2_S);
296 wco_S2 = gmx_cmplt_pr(rsq_S2, rc2_S);
301 /* Only remove the (sub-)diagonal to avoid double counting */
302 #if UNROLLJ == UNROLLI
305 wco_S0 = gmx_and_pb(wco_S0, diagonal_mask_S0);
306 wco_S2 = gmx_and_pb(wco_S2, diagonal_mask_S2);
309 #if UNROLLJ == 2*UNROLLI
312 wco_S0 = gmx_and_pb(wco_S0, diagonal_mask0_S0);
313 wco_S2 = gmx_and_pb(wco_S2, diagonal_mask0_S2);
315 else if (cj*2 + 1 == ci_sh)
317 wco_S0 = gmx_and_pb(wco_S0, diagonal_mask1_S0);
318 wco_S2 = gmx_and_pb(wco_S2, diagonal_mask1_S2);
321 #error "only UNROLLJ == UNROLLI*(1 or 2) currently supported in 2xnn kernels"
324 #else /* EXCL_FORCES */
325 /* No exclusion forces: remove all excluded atom pairs from the list */
326 wco_S0 = gmx_and_pb(wco_S0, interact_S0);
327 wco_S2 = gmx_and_pb(wco_S2, interact_S2);
334 real tmpa[2*GMX_SIMD_WIDTH_HERE], *tmp;
335 tmp = gmx_simd_align_real(tmpa);
336 for (i = 0; i < UNROLLI; i+=2)
338 gmx_store_pr(tmp, i == 0 ? wco_S0 : wco_S2);
339 for (j = 0; j < 2*UNROLLJ; j++)
351 /* For excluded pairs add a small number to avoid r^-6 = NaN */
352 rsq_S0 = gmx_masknot_add_pr(interact_S0, rsq_S0, avoid_sing_S);
353 rsq_S2 = gmx_masknot_add_pr(interact_S2, rsq_S2, avoid_sing_S);
357 rinv_S0 = gmx_invsqrt_pr(rsq_S0);
358 rinv_S2 = gmx_invsqrt_pr(rsq_S2);
361 /* Load parameters for j atom */
362 gmx_loaddh_pr(&jq_S, q+aj);
363 qq_S0 = gmx_mul_pr(iq_S0, jq_S);
364 qq_S2 = gmx_mul_pr(iq_S2, jq_S);
369 #if !defined LJ_COMB_GEOM && !defined LJ_COMB_LB && !defined FIX_LJ_C
370 load_lj_pair_params2(nbfp0, nbfp1, type, aj, &c6_S0, &c12_S0);
372 load_lj_pair_params2(nbfp2, nbfp3, type, aj, &c6_S2, &c12_S2);
374 #endif /* not defined any LJ rule */
377 gmx_loaddh_pr(&c6s_j_S, ljc+aj2+0);
378 gmx_loaddh_pr(&c12s_j_S, ljc+aj2+STRIDE);
379 c6_S0 = gmx_mul_pr(c6s_S0, c6s_j_S );
381 c6_S2 = gmx_mul_pr(c6s_S2, c6s_j_S );
383 c12_S0 = gmx_mul_pr(c12s_S0, c12s_j_S);
385 c12_S2 = gmx_mul_pr(c12s_S2, c12s_j_S);
387 #endif /* LJ_COMB_GEOM */
390 gmx_loaddh_pr(&hsig_j_S, ljc+aj2+0);
391 gmx_loaddh_pr(&seps_j_S, ljc+aj2+STRIDE);
393 sig_S0 = gmx_add_pr(hsig_i_S0, hsig_j_S);
394 eps_S0 = gmx_mul_pr(seps_i_S0, seps_j_S);
396 sig_S2 = gmx_add_pr(hsig_i_S2, hsig_j_S);
397 eps_S2 = gmx_mul_pr(seps_i_S2, seps_j_S);
399 #endif /* LJ_COMB_LB */
403 #ifndef CUTOFF_BLENDV
404 rinv_S0 = gmx_blendzero_pr(rinv_S0, wco_S0);
405 rinv_S2 = gmx_blendzero_pr(rinv_S2, wco_S2);
407 /* We only need to mask for the cut-off: blendv is faster */
408 rinv_S0 = gmx_blendv_pr(rinv_S0, zero_S, gmx_sub_pr(rc2_S, rsq_S0));
409 rinv_S2 = gmx_blendv_pr(rinv_S2, zero_S, gmx_sub_pr(rc2_S, rsq_S2));
412 rinvsq_S0 = gmx_mul_pr(rinv_S0, rinv_S0);
413 rinvsq_S2 = gmx_mul_pr(rinv_S2, rinv_S2);
416 /* Note that here we calculate force*r, not the usual force/r.
417 * This allows avoiding masking the reaction-field contribution,
418 * as frcoul is later multiplied by rinvsq which has been
419 * masked with the cut-off check.
423 /* Only add 1/r for non-excluded atom pairs */
424 rinv_ex_S0 = gmx_blendzero_pr(rinv_S0, interact_S0);
425 rinv_ex_S2 = gmx_blendzero_pr(rinv_S2, interact_S2);
427 /* No exclusion forces, we always need 1/r */
428 #define rinv_ex_S0 rinv_S0
429 #define rinv_ex_S2 rinv_S2
433 /* Electrostatic interactions */
434 frcoul_S0 = gmx_mul_pr(qq_S0, gmx_madd_pr(rsq_S0, mrc_3_S, rinv_ex_S0));
435 frcoul_S2 = gmx_mul_pr(qq_S2, gmx_madd_pr(rsq_S2, mrc_3_S, rinv_ex_S2));
438 vcoul_S0 = gmx_mul_pr(qq_S0, gmx_add_pr(rinv_ex_S0, gmx_add_pr(gmx_mul_pr(rsq_S0, hrc_3_S), moh_rc_S)));
439 vcoul_S2 = gmx_mul_pr(qq_S2, gmx_add_pr(rinv_ex_S2, gmx_add_pr(gmx_mul_pr(rsq_S2, hrc_3_S), moh_rc_S)));
443 #ifdef CALC_COUL_EWALD
444 /* We need to mask (or limit) rsq for the cut-off,
445 * as large distances can cause an overflow in gmx_pmecorrF/V.
447 #ifndef CUTOFF_BLENDV
448 brsq_S0 = gmx_mul_pr(beta2_S, gmx_blendzero_pr(rsq_S0, wco_S0));
449 brsq_S2 = gmx_mul_pr(beta2_S, gmx_blendzero_pr(rsq_S2, wco_S2));
451 /* Strangely, putting mul on a separate line is slower (icc 13) */
452 brsq_S0 = gmx_mul_pr(beta2_S, gmx_blendv_pr(rsq_S0, zero_S, gmx_sub_pr(rc2_S, rsq_S0)));
453 brsq_S2 = gmx_mul_pr(beta2_S, gmx_blendv_pr(rsq_S2, zero_S, gmx_sub_pr(rc2_S, rsq_S2)));
455 ewcorr_S0 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_S0), beta_S);
456 ewcorr_S2 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_S2), beta_S);
457 frcoul_S0 = gmx_mul_pr(qq_S0, gmx_madd_pr(ewcorr_S0, brsq_S0, rinv_ex_S0));
458 frcoul_S2 = gmx_mul_pr(qq_S2, gmx_madd_pr(ewcorr_S2, brsq_S2, rinv_ex_S2));
461 vc_sub_S0 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_S0), beta_S);
462 vc_sub_S2 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_S2), beta_S);
465 #endif /* CALC_COUL_EWALD */
468 /* Electrostatic interactions */
469 r_S0 = gmx_mul_pr(rsq_S0, rinv_S0);
470 r_S2 = gmx_mul_pr(rsq_S2, rinv_S2);
471 /* Convert r to scaled table units */
472 rs_S0 = gmx_mul_pr(r_S0, invtsp_S);
473 rs_S2 = gmx_mul_pr(r_S2, invtsp_S);
474 /* Truncate scaled r to an int */
475 ti_S0 = gmx_cvttpr_epi32(rs_S0);
476 ti_S2 = gmx_cvttpr_epi32(rs_S2);
477 #ifdef GMX_SIMD_HAVE_FLOOR
478 rf_S0 = gmx_floor_pr(rs_S0);
479 rf_S2 = gmx_floor_pr(rs_S2);
481 rf_S0 = gmx_cvtepi32_pr(ti_S0);
482 rf_S2 = gmx_cvtepi32_pr(ti_S2);
484 frac_S0 = gmx_sub_pr(rs_S0, rf_S0);
485 frac_S2 = gmx_sub_pr(rs_S2, rf_S2);
487 /* Load and interpolate table forces and possibly energies.
488 * Force and energy can be combined in one table, stride 4: FDV0
489 * or in two separate tables with stride 1: F and V
490 * Currently single precision uses FDV0, double F and V.
492 #ifndef CALC_ENERGIES
493 load_table_f(tab_coul_F, ti_S0, ti0, &ctab0_S0, &ctab1_S0);
494 load_table_f(tab_coul_F, ti_S2, ti2, &ctab0_S2, &ctab1_S2);
497 load_table_f_v(tab_coul_F, ti_S0, ti0, &ctab0_S0, &ctab1_S0, &ctabv_S0);
498 load_table_f_v(tab_coul_F, ti_S2, ti2, &ctab0_S2, &ctab1_S2, &ctabv_S2);
500 load_table_f_v(tab_coul_F, tab_coul_V, ti_S0, ti0, &ctab0_S0, &ctab1_S0, &ctabv_S0);
501 load_table_f_v(tab_coul_F, tab_coul_V, ti_S2, ti2, &ctab0_S2, &ctab1_S2, &ctabv_S2);
504 fsub_S0 = gmx_add_pr(ctab0_S0, gmx_mul_pr(frac_S0, ctab1_S0));
505 fsub_S2 = gmx_add_pr(ctab0_S2, gmx_mul_pr(frac_S2, ctab1_S2));
506 frcoul_S0 = gmx_mul_pr(qq_S0, gmx_sub_pr(rinv_ex_S0, gmx_mul_pr(fsub_S0, r_S0)));
507 frcoul_S2 = gmx_mul_pr(qq_S2, gmx_sub_pr(rinv_ex_S2, gmx_mul_pr(fsub_S2, r_S2)));
510 vc_sub_S0 = gmx_add_pr(ctabv_S0, gmx_mul_pr(gmx_mul_pr(mhalfsp_S, frac_S0), gmx_add_pr(ctab0_S0, fsub_S0)));
511 vc_sub_S2 = gmx_add_pr(ctabv_S2, gmx_mul_pr(gmx_mul_pr(mhalfsp_S, frac_S2), gmx_add_pr(ctab0_S2, fsub_S2)));
513 #endif /* CALC_COUL_TAB */
515 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
516 #ifndef NO_SHIFT_EWALD
517 /* Add Ewald potential shift to vc_sub for convenience */
519 vc_sub_S0 = gmx_add_pr(vc_sub_S0, gmx_blendzero_pr(sh_ewald_S, interact_S0));
520 vc_sub_S2 = gmx_add_pr(vc_sub_S2, gmx_blendzero_pr(sh_ewald_S, interact_S2));
522 vc_sub_S0 = gmx_add_pr(vc_sub_S0, sh_ewald_S);
523 vc_sub_S2 = gmx_add_pr(vc_sub_S2, sh_ewald_S);
527 vcoul_S0 = gmx_mul_pr(qq_S0, gmx_sub_pr(rinv_ex_S0, vc_sub_S0));
528 vcoul_S2 = gmx_mul_pr(qq_S2, gmx_sub_pr(rinv_ex_S2, vc_sub_S2));
532 /* Mask energy for cut-off and diagonal */
533 vcoul_S0 = gmx_blendzero_pr(vcoul_S0, wco_S0);
534 vcoul_S2 = gmx_blendzero_pr(vcoul_S2, wco_S2);
537 #endif /* CALC_COULOMB */
540 /* Lennard-Jones interaction */
542 #ifdef VDW_CUTOFF_CHECK
543 wco_vdw_S0 = gmx_cmplt_pr(rsq_S0, rcvdw2_S);
545 wco_vdw_S2 = gmx_cmplt_pr(rsq_S2, rcvdw2_S);
548 /* Same cut-off for Coulomb and VdW, reuse the registers */
549 #define wco_vdw_S0 wco_S0
550 #define wco_vdw_S2 wco_S2
554 rinvsix_S0 = gmx_mul_pr(rinvsq_S0, gmx_mul_pr(rinvsq_S0, rinvsq_S0));
556 rinvsix_S0 = gmx_blendzero_pr(rinvsix_S0, interact_S0);
559 rinvsix_S2 = gmx_mul_pr(rinvsq_S2, gmx_mul_pr(rinvsq_S2, rinvsq_S2));
561 rinvsix_S2 = gmx_blendzero_pr(rinvsix_S2, interact_S2);
564 #ifdef VDW_CUTOFF_CHECK
565 rinvsix_S0 = gmx_blendzero_pr(rinvsix_S0, wco_vdw_S0);
567 rinvsix_S2 = gmx_blendzero_pr(rinvsix_S2, wco_vdw_S2);
570 FrLJ6_S0 = gmx_mul_pr(c6_S0, rinvsix_S0);
572 FrLJ6_S2 = gmx_mul_pr(c6_S2, rinvsix_S2);
574 FrLJ12_S0 = gmx_mul_pr(c12_S0, gmx_mul_pr(rinvsix_S0, rinvsix_S0));
576 FrLJ12_S2 = gmx_mul_pr(c12_S2, gmx_mul_pr(rinvsix_S2, rinvsix_S2));
578 #endif /* not LJ_COMB_LB */
581 sir_S0 = gmx_mul_pr(sig_S0, rinv_S0);
583 sir_S2 = gmx_mul_pr(sig_S2, rinv_S2);
585 sir2_S0 = gmx_mul_pr(sir_S0, sir_S0);
587 sir2_S2 = gmx_mul_pr(sir_S2, sir_S2);
589 sir6_S0 = gmx_mul_pr(sir2_S0, gmx_mul_pr(sir2_S0, sir2_S0));
591 sir6_S0 = gmx_blendzero_pr(sir6_S0, interact_S0);
594 sir6_S2 = gmx_mul_pr(sir2_S2, gmx_mul_pr(sir2_S2, sir2_S2));
596 sir6_S2 = gmx_blendzero_pr(sir6_S2, interact_S2);
599 #ifdef VDW_CUTOFF_CHECK
600 sir6_S0 = gmx_blendzero_pr(sir6_S0, wco_vdw_S0);
602 sir6_S2 = gmx_blendzero_pr(sir6_S2, wco_vdw_S2);
605 FrLJ6_S0 = gmx_mul_pr(eps_S0, sir6_S0);
607 FrLJ6_S2 = gmx_mul_pr(eps_S2, sir6_S2);
609 FrLJ12_S0 = gmx_mul_pr(FrLJ6_S0, sir6_S0);
611 FrLJ12_S2 = gmx_mul_pr(FrLJ6_S2, sir6_S2);
613 #if defined CALC_ENERGIES
614 /* We need C6 and C12 to calculate the LJ potential shift */
615 sig2_S0 = gmx_mul_pr(sig_S0, sig_S0);
617 sig2_S2 = gmx_mul_pr(sig_S2, sig_S2);
619 sig6_S0 = gmx_mul_pr(sig2_S0, gmx_mul_pr(sig2_S0, sig2_S0));
621 sig6_S2 = gmx_mul_pr(sig2_S2, gmx_mul_pr(sig2_S2, sig2_S2));
623 c6_S0 = gmx_mul_pr(eps_S0, sig6_S0);
625 c6_S2 = gmx_mul_pr(eps_S2, sig6_S2);
627 c12_S0 = gmx_mul_pr(c6_S0, sig6_S0);
629 c12_S2 = gmx_mul_pr(c6_S2, sig6_S2);
632 #endif /* LJ_COMB_LB */
638 /* Extract the group pair index per j pair.
639 * Energy groups are stored per i-cluster, so things get
640 * complicated when the i- and j-cluster size don't match.
645 egps_j = nbat->energrp[cj>>1];
646 egp_jj[0] = ((egps_j >> ((cj & 1)*egps_jshift)) & egps_jmask)*egps_jstride;
648 /* We assume UNROLLI <= UNROLLJ */
650 for (jdi = 0; jdi < UNROLLJ/UNROLLI; jdi++)
653 egps_j = nbat->energrp[cj*(UNROLLJ/UNROLLI)+jdi];
654 for (jj = 0; jj < (UNROLLI/2); jj++)
656 egp_jj[jdi*(UNROLLI/2)+jj] = ((egps_j >> (jj*egps_jshift)) & egps_jmask)*egps_jstride;
664 #ifndef ENERGY_GROUPS
665 vctot_S = gmx_add_pr(vctot_S, gmx_add_pr(vcoul_S0, vcoul_S2));
667 add_ener_grp_halves(vcoul_S0, vctp[0], vctp[1], egp_jj);
668 add_ener_grp_halves(vcoul_S2, vctp[2], vctp[3], egp_jj);
673 /* Calculate the LJ energies */
674 VLJ6_S0 = gmx_mul_pr(sixth_S, gmx_sub_pr(FrLJ6_S0, gmx_mul_pr(c6_S0, sh_invrc6_S)));
676 VLJ6_S2 = gmx_mul_pr(sixth_S, gmx_sub_pr(FrLJ6_S2, gmx_mul_pr(c6_S2, sh_invrc6_S)));
678 VLJ12_S0 = gmx_mul_pr(twelveth_S, gmx_sub_pr(FrLJ12_S0, gmx_mul_pr(c12_S0, sh_invrc12_S)));
680 VLJ12_S2 = gmx_mul_pr(twelveth_S, gmx_sub_pr(FrLJ12_S2, gmx_mul_pr(c12_S2, sh_invrc12_S)));
683 VLJ_S0 = gmx_sub_pr(VLJ12_S0, VLJ6_S0);
685 VLJ_S2 = gmx_sub_pr(VLJ12_S2, VLJ6_S2);
687 /* The potential shift should be removed for pairs beyond cut-off */
688 VLJ_S0 = gmx_blendzero_pr(VLJ_S0, wco_vdw_S0);
690 VLJ_S2 = gmx_blendzero_pr(VLJ_S2, wco_vdw_S2);
693 /* The potential shift should be removed for excluded pairs */
694 VLJ_S0 = gmx_blendzero_pr(VLJ_S0, interact_S0);
696 VLJ_S2 = gmx_blendzero_pr(VLJ_S2, interact_S2);
699 #ifndef ENERGY_GROUPS
700 Vvdwtot_S = gmx_add_pr(Vvdwtot_S,
702 gmx_add_pr(VLJ_S0, VLJ_S2)
708 add_ener_grp_halves(VLJ_S0, vvdwtp[0], vvdwtp[1], egp_jj);
710 add_ener_grp_halves(VLJ_S2, vvdwtp[2], vvdwtp[3], egp_jj);
714 #endif /* CALC_ENERGIES */
717 fscal_S0 = gmx_mul_pr(rinvsq_S0,
719 gmx_add_pr(frcoul_S0,
723 gmx_sub_pr(FrLJ12_S0, FrLJ6_S0)));
725 fscal_S0 = gmx_mul_pr(rinvsq_S0, frcoul_S0);
727 #if defined CALC_LJ && !defined HALF_LJ
728 fscal_S2 = gmx_mul_pr(rinvsq_S2,
730 gmx_add_pr(frcoul_S2,
734 gmx_sub_pr(FrLJ12_S2, FrLJ6_S2)));
736 /* Atom 2 and 3 don't have LJ, so only add Coulomb forces */
737 fscal_S2 = gmx_mul_pr(rinvsq_S2, frcoul_S2);
740 /* Calculate temporary vectorial force */
741 tx_S0 = gmx_mul_pr(fscal_S0, dx_S0);
742 tx_S2 = gmx_mul_pr(fscal_S2, dx_S2);
743 ty_S0 = gmx_mul_pr(fscal_S0, dy_S0);
744 ty_S2 = gmx_mul_pr(fscal_S2, dy_S2);
745 tz_S0 = gmx_mul_pr(fscal_S0, dz_S0);
746 tz_S2 = gmx_mul_pr(fscal_S2, dz_S2);
748 /* Increment i atom force */
749 fix_S0 = gmx_add_pr(fix_S0, tx_S0);
750 fix_S2 = gmx_add_pr(fix_S2, tx_S2);
751 fiy_S0 = gmx_add_pr(fiy_S0, ty_S0);
752 fiy_S2 = gmx_add_pr(fiy_S2, ty_S2);
753 fiz_S0 = gmx_add_pr(fiz_S0, tz_S0);
754 fiz_S2 = gmx_add_pr(fiz_S2, tz_S2);
756 /* Decrement j atom force */
757 gmx_load_hpr(&fjx_S, f+ajx);
758 gmx_load_hpr(&fjy_S, f+ajy);
759 gmx_load_hpr(&fjz_S, f+ajz);
760 gmx_store_hpr(f+ajx, gmx_sub_hpr(fjx_S, gmx_sum4_hpr(tx_S0, tx_S2)));
761 gmx_store_hpr(f+ajy, gmx_sub_hpr(fjy_S, gmx_sum4_hpr(ty_S0, ty_S2)));
762 gmx_store_hpr(f+ajz, gmx_sub_hpr(fjz_S, gmx_sum4_hpr(tz_S0, tz_S2)));