2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2009, The GROMACS Development Team
6 * Copyright (c) 2012,2013, by the GROMACS development team, led by
7 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
8 * others, as listed in the AUTHORS file in the top-level source
9 * directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
38 /* This is the innermost loop contents for the 4 x N atom SIMD kernel.
39 * This flavor of the kernel duplicates the data for N j-particles in
40 * 2xN wide SIMD registers to do operate on 2 i-particles at once.
41 * This leads to 4/2=2 sets of most instructions. Therefore we call
42 * this kernel 2x(N+N) = 2xnn
44 * This 2xnn kernel is basically the 4xn equivalent with half the registers
45 * and instructions removed.
47 * An alternative would be to load to different cluster of N j-particles
48 * into SIMD registers, giving a 4x(N+N) kernel. This doubles the amount
49 * of instructions, which could lead to better scheduling. But we actually
50 * observed worse scheduling for the AVX-256 4x8 normal analytical PME
51 * kernel, which has a lower pair throughput than 2x(4+4) with gcc 4.7.
52 * It could be worth trying this option, but it takes some more effort.
53 * This 2xnn kernel is basically the 4xn equivalent with
57 /* When calculating RF or Ewald interactions we calculate the electrostatic
58 * forces on excluded atom pairs here in the non-bonded loops.
59 * But when energies and/or virial is required we calculate them
60 * separately to as then it is easier to separate the energy and virial
63 #if defined CHECK_EXCLS && defined CALC_COULOMB
67 /* Without exclusions and energies we only need to mask the cut-off,
68 * this can be faster with blendv.
70 #if !(defined CHECK_EXCLS || defined CALC_ENERGIES) && defined GMX_HAVE_SIMD_BLENDV && !defined COUNT_PAIRS
71 /* With RF and tabulated Coulomb we replace cmp+and with sub+blendv.
72 * With gcc this is slower, except for RF on Sandy Bridge.
73 * Tested with gcc 4.6.2, 4.6.3 and 4.7.1.
75 #if (defined CALC_COUL_RF || defined CALC_COUL_TAB) && (!defined __GNUC__ || (defined CALC_COUL_RF && defined GMX_X86_AVX_256))
78 /* With analytical Ewald we replace cmp+and+and with sub+blendv+blendv.
79 * This is only faster with icc on Sandy Bridge (PS kernel slower than gcc 4.7).
82 #if defined CALC_COUL_EWALD && defined __INTEL_COMPILER && defined GMX_X86_AVX_256
88 int cj, aj, ajx, ajy, ajz;
91 /* Energy group indices for two atoms packed into one int */
92 int egp_jj[UNROLLJ/2];
96 /* Interaction (non-exclusion) mask of all 1's or 0's */
101 gmx_mm_pr jx_S, jy_S, jz_S;
102 gmx_mm_pr dx_S0, dy_S0, dz_S0;
103 gmx_mm_pr dx_S2, dy_S2, dz_S2;
104 gmx_mm_pr tx_S0, ty_S0, tz_S0;
105 gmx_mm_pr tx_S2, ty_S2, tz_S2;
106 gmx_mm_pr rsq_S0, rinv_S0, rinvsq_S0;
107 gmx_mm_pr rsq_S2, rinv_S2, rinvsq_S2;
108 #ifndef CUTOFF_BLENDV
109 /* wco: within cut-off, mask of all 1's or 0's */
113 #ifdef VDW_CUTOFF_CHECK
114 gmx_mm_pr wco_vdw_S0;
116 gmx_mm_pr wco_vdw_S2;
121 /* 1/r masked with the interaction mask */
122 gmx_mm_pr rinv_ex_S0;
123 gmx_mm_pr rinv_ex_S2;
129 /* The force (PME mesh force) we need to subtract from 1/r^2 */
133 #ifdef CALC_COUL_EWALD
134 gmx_mm_pr brsq_S0, brsq_S2;
135 gmx_mm_pr ewcorr_S0, ewcorr_S2;
138 /* frcoul = (1/r - fsub)*r */
142 /* For tables: r, rs=r/sp, rf=floor(rs), frac=rs-rf */
143 gmx_mm_pr r_S0, rs_S0, rf_S0, frac_S0;
144 gmx_mm_pr r_S2, rs_S2, rf_S2, frac_S2;
145 /* Table index: rs truncated to an int */
146 gmx_epi32 ti_S0, ti_S2;
147 /* Linear force table values */
148 gmx_mm_pr ctab0_S0, ctab1_S0;
149 gmx_mm_pr ctab0_S2, ctab1_S2;
151 /* Quadratic energy table value */
156 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
157 /* The potential (PME mesh) we need to subtract from 1/r */
162 /* Electrostatic potential */
167 /* The force times 1/r */
173 /* LJ sigma_j/2 and sqrt(epsilon_j) */
174 gmx_mm_pr hsig_j_S, seps_j_S;
175 /* LJ sigma_ij and epsilon_ij */
176 gmx_mm_pr sig_S0, eps_S0;
178 gmx_mm_pr sig_S2, eps_S2;
181 gmx_mm_pr sig2_S0, sig6_S0;
183 gmx_mm_pr sig2_S2, sig6_S2;
185 #endif /* LJ_COMB_LB */
189 gmx_mm_pr c6s_j_S, c12s_j_S;
192 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
193 /* Index for loading LJ parameters, complicated when interleaving */
198 /* LJ C6 and C12 parameters, used with geometric comb. rule */
199 gmx_mm_pr c6_S0, c12_S0;
201 gmx_mm_pr c6_S2, c12_S2;
205 /* Intermediate variables for LJ calculation */
207 gmx_mm_pr rinvsix_S0;
209 gmx_mm_pr rinvsix_S2;
213 gmx_mm_pr sir_S0, sir2_S0, sir6_S0;
215 gmx_mm_pr sir_S2, sir2_S2, sir6_S2;
219 gmx_mm_pr FrLJ6_S0, FrLJ12_S0;
221 gmx_mm_pr FrLJ6_S2, FrLJ12_S2;
224 gmx_mm_pr VLJ6_S0, VLJ12_S0, VLJ_S0;
226 gmx_mm_pr VLJ6_S2, VLJ12_S2, VLJ_S2;
231 gmx_mm_hpr fjx_S, fjy_S, fjz_S;
233 /* j-cluster index */
236 /* Atom indices (of the first atom in the cluster) */
238 #if defined CALC_LJ && (defined LJ_COMB_GEOM || defined LJ_COMB_LB)
239 #if UNROLLJ == STRIDE
242 aj2 = (cj>>1)*2*STRIDE + (cj & 1)*UNROLLJ;
245 #if UNROLLJ == STRIDE
248 ajx = (cj>>1)*DIM*STRIDE + (cj & 1)*UNROLLJ;
255 /* Load integer interaction mask */
256 gmx_mm_pr mask_pr_S = gmx_castsi_pr(gmx_set1_epi32(l_cj[cjind].excl));
258 int_S0 = gmx_checkbitmask_pr(mask_pr_S, mask_S0);
259 int_S2 = gmx_checkbitmask_pr(mask_pr_S, mask_S2);
263 /* load j atom coordinates */
264 gmx_loaddh_pr(jx_S, x+ajx);
265 gmx_loaddh_pr(jy_S, x+ajy);
266 gmx_loaddh_pr(jz_S, x+ajz);
268 /* Calculate distance */
269 dx_S0 = gmx_sub_pr(ix_S0, jx_S);
270 dy_S0 = gmx_sub_pr(iy_S0, jy_S);
271 dz_S0 = gmx_sub_pr(iz_S0, jz_S);
272 dx_S2 = gmx_sub_pr(ix_S2, jx_S);
273 dy_S2 = gmx_sub_pr(iy_S2, jy_S);
274 dz_S2 = gmx_sub_pr(iz_S2, jz_S);
276 /* rsq = dx*dx+dy*dy+dz*dz */
277 rsq_S0 = gmx_calc_rsq_pr(dx_S0, dy_S0, dz_S0);
278 rsq_S2 = gmx_calc_rsq_pr(dx_S2, dy_S2, dz_S2);
280 #ifndef CUTOFF_BLENDV
281 wco_S0 = gmx_cmplt_pr(rsq_S0, rc2_S);
282 wco_S2 = gmx_cmplt_pr(rsq_S2, rc2_S);
287 /* Only remove the (sub-)diagonal to avoid double counting */
288 #if UNROLLJ == UNROLLI
291 wco_S0 = gmx_and_pr(wco_S0, diag_S0);
292 wco_S2 = gmx_and_pr(wco_S2, diag_S2);
295 #if UNROLLJ == 2*UNROLLI
298 wco_S0 = gmx_and_pr(wco_S0, diag0_S0);
299 wco_S2 = gmx_and_pr(wco_S2, diag0_S2);
301 else if (cj*2 + 1 == ci_sh)
303 wco_S0 = gmx_and_pr(wco_S0, diag1_S0);
304 wco_S2 = gmx_and_pr(wco_S2, diag1_S2);
307 #error "only UNROLLJ == UNROLLI*(1 or 2) currently supported in 2xnn kernels"
310 #else /* EXCL_FORCES */
311 /* No exclusion forces: remove all excluded atom pairs from the list */
312 wco_S0 = gmx_and_pr(wco_S0, int_S0);
313 wco_S2 = gmx_and_pr(wco_S2, int_S2);
320 real tmpa[2*GMX_SIMD_WIDTH_HERE], *tmp;
321 tmp = gmx_simd_align_real(tmpa);
322 for (i = 0; i < UNROLLI; i+=2)
324 gmx_store_pr(tmp, i == 0 ? wco_S0 : wco_S2);
325 for (j = 0; j < 2*UNROLLJ; j++)
337 /* For excluded pairs add a small number to avoid r^-6 = NaN */
338 rsq_S0 = gmx_add_pr(rsq_S0, gmx_andnot_pr(int_S0, avoid_sing_S));
339 rsq_S2 = gmx_add_pr(rsq_S2, gmx_andnot_pr(int_S2, avoid_sing_S));
343 rinv_S0 = gmx_invsqrt_pr(rsq_S0);
344 rinv_S2 = gmx_invsqrt_pr(rsq_S2);
347 /* Load parameters for j atom */
348 gmx_loaddh_pr(jq_S, q+aj);
349 qq_S0 = gmx_mul_pr(iq_S0, jq_S);
350 qq_S2 = gmx_mul_pr(iq_S2, jq_S);
355 #if !defined LJ_COMB_GEOM && !defined LJ_COMB_LB && !defined FIX_LJ_C
356 load_lj_pair_params2(nbfp0, nbfp1, type, aj, c6_S0, c12_S0);
358 load_lj_pair_params2(nbfp2, nbfp3, type, aj, c6_S2, c12_S2);
360 #endif /* not defined any LJ rule */
363 gmx_loaddh_pr(c6s_j_S, ljc+aj2+0);
364 gmx_loaddh_pr(c12s_j_S, ljc+aj2+STRIDE);
365 c6_S0 = gmx_mul_pr(c6s_S0, c6s_j_S );
367 c6_S2 = gmx_mul_pr(c6s_S2, c6s_j_S );
369 c12_S0 = gmx_mul_pr(c12s_S0, c12s_j_S);
371 c12_S2 = gmx_mul_pr(c12s_S2, c12s_j_S);
373 #endif /* LJ_COMB_GEOM */
376 gmx_loaddh_pr(hsig_j_S, ljc+aj2+0);
377 gmx_loaddh_pr(seps_j_S, ljc+aj2+STRIDE);
379 sig_S0 = gmx_add_pr(hsig_i_S0, hsig_j_S);
380 eps_S0 = gmx_mul_pr(seps_i_S0, seps_j_S);
382 sig_S2 = gmx_add_pr(hsig_i_S2, hsig_j_S);
383 eps_S2 = gmx_mul_pr(seps_i_S2, seps_j_S);
385 #endif /* LJ_COMB_LB */
389 #ifndef CUTOFF_BLENDV
390 rinv_S0 = gmx_blendzero_pr(rinv_S0, wco_S0);
391 rinv_S2 = gmx_blendzero_pr(rinv_S2, wco_S2);
393 /* We only need to mask for the cut-off: blendv is faster */
394 rinv_S0 = gmx_blendv_pr(rinv_S0, zero_S, gmx_sub_pr(rc2_S, rsq_S0));
395 rinv_S2 = gmx_blendv_pr(rinv_S2, zero_S, gmx_sub_pr(rc2_S, rsq_S2));
398 rinvsq_S0 = gmx_mul_pr(rinv_S0, rinv_S0);
399 rinvsq_S2 = gmx_mul_pr(rinv_S2, rinv_S2);
402 /* Note that here we calculate force*r, not the usual force/r.
403 * This allows avoiding masking the reaction-field contribution,
404 * as frcoul is later multiplied by rinvsq which has been
405 * masked with the cut-off check.
409 /* Only add 1/r for non-excluded atom pairs */
410 rinv_ex_S0 = gmx_blendzero_pr(rinv_S0, int_S0);
411 rinv_ex_S2 = gmx_blendzero_pr(rinv_S2, int_S2);
413 /* No exclusion forces, we always need 1/r */
414 #define rinv_ex_S0 rinv_S0
415 #define rinv_ex_S2 rinv_S2
419 /* Electrostatic interactions */
420 frcoul_S0 = gmx_mul_pr(qq_S0, gmx_add_pr(rinv_ex_S0, gmx_mul_pr(rsq_S0, mrc_3_S)));
421 frcoul_S2 = gmx_mul_pr(qq_S2, gmx_add_pr(rinv_ex_S2, gmx_mul_pr(rsq_S2, mrc_3_S)));
424 vcoul_S0 = gmx_mul_pr(qq_S0, gmx_add_pr(rinv_ex_S0, gmx_add_pr(gmx_mul_pr(rsq_S0, hrc_3_S), moh_rc_S)));
425 vcoul_S2 = gmx_mul_pr(qq_S2, gmx_add_pr(rinv_ex_S2, gmx_add_pr(gmx_mul_pr(rsq_S2, hrc_3_S), moh_rc_S)));
429 #ifdef CALC_COUL_EWALD
430 /* We need to mask (or limit) rsq for the cut-off,
431 * as large distances can cause an overflow in gmx_pmecorrF/V.
433 #ifndef CUTOFF_BLENDV
434 brsq_S0 = gmx_mul_pr(beta2_S, gmx_blendzero_pr(rsq_S0, wco_S0));
435 brsq_S2 = gmx_mul_pr(beta2_S, gmx_blendzero_pr(rsq_S2, wco_S2));
437 /* Strangely, putting mul on a separate line is slower (icc 13) */
438 brsq_S0 = gmx_mul_pr(beta2_S, gmx_blendv_pr(rsq_S0, zero_S, gmx_sub_pr(rc2_S, rsq_S0)));
439 brsq_S2 = gmx_mul_pr(beta2_S, gmx_blendv_pr(rsq_S2, zero_S, gmx_sub_pr(rc2_S, rsq_S2)));
441 ewcorr_S0 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_S0), beta_S);
442 ewcorr_S2 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_S2), beta_S);
443 frcoul_S0 = gmx_mul_pr(qq_S0, gmx_add_pr(rinv_ex_S0, gmx_mul_pr(ewcorr_S0, brsq_S0)));
444 frcoul_S2 = gmx_mul_pr(qq_S2, gmx_add_pr(rinv_ex_S2, gmx_mul_pr(ewcorr_S2, brsq_S2)));
447 vc_sub_S0 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_S0), beta_S);
448 vc_sub_S2 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_S2), beta_S);
451 #endif /* CALC_COUL_EWALD */
454 /* Electrostatic interactions */
455 r_S0 = gmx_mul_pr(rsq_S0, rinv_S0);
456 r_S2 = gmx_mul_pr(rsq_S2, rinv_S2);
457 /* Convert r to scaled table units */
458 rs_S0 = gmx_mul_pr(r_S0, invtsp_S);
459 rs_S2 = gmx_mul_pr(r_S2, invtsp_S);
460 /* Truncate scaled r to an int */
461 ti_S0 = gmx_cvttpr_epi32(rs_S0);
462 ti_S2 = gmx_cvttpr_epi32(rs_S2);
463 #ifdef GMX_HAVE_SIMD_FLOOR
464 rf_S0 = gmx_floor_pr(rs_S0);
465 rf_S2 = gmx_floor_pr(rs_S2);
467 rf_S0 = gmx_cvtepi32_pr(ti_S0);
468 rf_S2 = gmx_cvtepi32_pr(ti_S2);
470 frac_S0 = gmx_sub_pr(rs_S0, rf_S0);
471 frac_S2 = gmx_sub_pr(rs_S2, rf_S2);
473 /* Load and interpolate table forces and possibly energies.
474 * Force and energy can be combined in one table, stride 4: FDV0
475 * or in two separate tables with stride 1: F and V
476 * Currently single precision uses FDV0, double F and V.
478 #ifndef CALC_ENERGIES
479 load_table_f(tab_coul_F, ti_S0, ti0, ctab0_S0, ctab1_S0);
480 load_table_f(tab_coul_F, ti_S2, ti2, ctab0_S2, ctab1_S2);
483 load_table_f_v(tab_coul_F, ti_S0, ti0, ctab0_S0, ctab1_S0, ctabv_S0);
484 load_table_f_v(tab_coul_F, ti_S2, ti2, ctab0_S2, ctab1_S2, ctabv_S2);
486 load_table_f_v(tab_coul_F, tab_coul_V, ti_S0, ti0, ctab0_S0, ctab1_S0, ctabv_S0);
487 load_table_f_v(tab_coul_F, tab_coul_V, ti_S2, ti2, ctab0_S2, ctab1_S2, ctabv_S2);
490 fsub_S0 = gmx_add_pr(ctab0_S0, gmx_mul_pr(frac_S0, ctab1_S0));
491 fsub_S2 = gmx_add_pr(ctab0_S2, gmx_mul_pr(frac_S2, ctab1_S2));
492 frcoul_S0 = gmx_mul_pr(qq_S0, gmx_sub_pr(rinv_ex_S0, gmx_mul_pr(fsub_S0, r_S0)));
493 frcoul_S2 = gmx_mul_pr(qq_S2, gmx_sub_pr(rinv_ex_S2, gmx_mul_pr(fsub_S2, r_S2)));
496 vc_sub_S0 = gmx_add_pr(ctabv_S0, gmx_mul_pr(gmx_mul_pr(mhalfsp_S, frac_S0), gmx_add_pr(ctab0_S0, fsub_S0)));
497 vc_sub_S2 = gmx_add_pr(ctabv_S2, gmx_mul_pr(gmx_mul_pr(mhalfsp_S, frac_S2), gmx_add_pr(ctab0_S2, fsub_S2)));
499 #endif /* CALC_COUL_TAB */
501 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
502 #ifndef NO_SHIFT_EWALD
503 /* Add Ewald potential shift to vc_sub for convenience */
505 vc_sub_S0 = gmx_add_pr(vc_sub_S0, gmx_blendzero_pr(sh_ewald_S, int_S0));
506 vc_sub_S2 = gmx_add_pr(vc_sub_S2, gmx_blendzero_pr(sh_ewald_S, int_S2));
508 vc_sub_S0 = gmx_add_pr(vc_sub_S0, sh_ewald_S);
509 vc_sub_S2 = gmx_add_pr(vc_sub_S2, sh_ewald_S);
513 vcoul_S0 = gmx_mul_pr(qq_S0, gmx_sub_pr(rinv_ex_S0, vc_sub_S0));
514 vcoul_S2 = gmx_mul_pr(qq_S2, gmx_sub_pr(rinv_ex_S2, vc_sub_S2));
518 /* Mask energy for cut-off and diagonal */
519 vcoul_S0 = gmx_blendzero_pr(vcoul_S0, wco_S0);
520 vcoul_S2 = gmx_blendzero_pr(vcoul_S2, wco_S2);
523 #endif /* CALC_COULOMB */
526 /* Lennard-Jones interaction */
528 #ifdef VDW_CUTOFF_CHECK
529 wco_vdw_S0 = gmx_cmplt_pr(rsq_S0, rcvdw2_S);
531 wco_vdw_S2 = gmx_cmplt_pr(rsq_S2, rcvdw2_S);
534 /* Same cut-off for Coulomb and VdW, reuse the registers */
535 #define wco_vdw_S0 wco_S0
536 #define wco_vdw_S2 wco_S2
540 rinvsix_S0 = gmx_mul_pr(rinvsq_S0, gmx_mul_pr(rinvsq_S0, rinvsq_S0));
542 rinvsix_S0 = gmx_blendzero_pr(rinvsix_S0, int_S0);
545 rinvsix_S2 = gmx_mul_pr(rinvsq_S2, gmx_mul_pr(rinvsq_S2, rinvsq_S2));
547 rinvsix_S2 = gmx_blendzero_pr(rinvsix_S2, int_S2);
550 #ifdef VDW_CUTOFF_CHECK
551 rinvsix_S0 = gmx_blendzero_pr(rinvsix_S0, wco_vdw_S0);
553 rinvsix_S2 = gmx_blendzero_pr(rinvsix_S2, wco_vdw_S2);
556 FrLJ6_S0 = gmx_mul_pr(c6_S0, rinvsix_S0);
558 FrLJ6_S2 = gmx_mul_pr(c6_S2, rinvsix_S2);
560 FrLJ12_S0 = gmx_mul_pr(c12_S0, gmx_mul_pr(rinvsix_S0, rinvsix_S0));
562 FrLJ12_S2 = gmx_mul_pr(c12_S2, gmx_mul_pr(rinvsix_S2, rinvsix_S2));
564 #endif /* not LJ_COMB_LB */
567 sir_S0 = gmx_mul_pr(sig_S0, rinv_S0);
569 sir_S2 = gmx_mul_pr(sig_S2, rinv_S2);
571 sir2_S0 = gmx_mul_pr(sir_S0, sir_S0);
573 sir2_S2 = gmx_mul_pr(sir_S2, sir_S2);
575 sir6_S0 = gmx_mul_pr(sir2_S0, gmx_mul_pr(sir2_S0, sir2_S0));
577 sir6_S0 = gmx_blendzero_pr(sir6_S0, int_S0);
580 sir6_S2 = gmx_mul_pr(sir2_S2, gmx_mul_pr(sir2_S2, sir2_S2));
582 sir6_S2 = gmx_blendzero_pr(sir6_S2, int_S2);
585 #ifdef VDW_CUTOFF_CHECK
586 sir6_S0 = gmx_blendzero_pr(sir6_S0, wco_vdw_S0);
588 sir6_S2 = gmx_blendzero_pr(sir6_S2, wco_vdw_S2);
591 FrLJ6_S0 = gmx_mul_pr(eps_S0, sir6_S0);
593 FrLJ6_S2 = gmx_mul_pr(eps_S2, sir6_S2);
595 FrLJ12_S0 = gmx_mul_pr(FrLJ6_S0, sir6_S0);
597 FrLJ12_S2 = gmx_mul_pr(FrLJ6_S2, sir6_S2);
599 #if defined CALC_ENERGIES
600 /* We need C6 and C12 to calculate the LJ potential shift */
601 sig2_S0 = gmx_mul_pr(sig_S0, sig_S0);
603 sig2_S2 = gmx_mul_pr(sig_S2, sig_S2);
605 sig6_S0 = gmx_mul_pr(sig2_S0, gmx_mul_pr(sig2_S0, sig2_S0));
607 sig6_S2 = gmx_mul_pr(sig2_S2, gmx_mul_pr(sig2_S2, sig2_S2));
609 c6_S0 = gmx_mul_pr(eps_S0, sig6_S0);
611 c6_S2 = gmx_mul_pr(eps_S2, sig6_S2);
613 c12_S0 = gmx_mul_pr(c6_S0, sig6_S0);
615 c12_S2 = gmx_mul_pr(c6_S2, sig6_S2);
618 #endif /* LJ_COMB_LB */
624 /* Extract the group pair index per j pair.
625 * Energy groups are stored per i-cluster, so things get
626 * complicated when the i- and j-cluster size don't match.
631 egps_j = nbat->energrp[cj>>1];
632 egp_jj[0] = ((egps_j >> ((cj & 1)*egps_jshift)) & egps_jmask)*egps_jstride;
634 /* We assume UNROLLI <= UNROLLJ */
636 for (jdi = 0; jdi < UNROLLJ/UNROLLI; jdi++)
639 egps_j = nbat->energrp[cj*(UNROLLJ/UNROLLI)+jdi];
640 for (jj = 0; jj < (UNROLLI/2); jj++)
642 egp_jj[jdi*(UNROLLI/2)+jj] = ((egps_j >> (jj*egps_jshift)) & egps_jmask)*egps_jstride;
650 #ifndef ENERGY_GROUPS
651 vctot_S = gmx_add_pr(vctot_S, gmx_add_pr(vcoul_S0, vcoul_S2));
653 add_ener_grp_halves(vcoul_S0, vctp[0], vctp[1], egp_jj);
654 add_ener_grp_halves(vcoul_S2, vctp[2], vctp[3], egp_jj);
659 /* Calculate the LJ energies */
660 VLJ6_S0 = gmx_mul_pr(sixth_S, gmx_sub_pr(FrLJ6_S0, gmx_mul_pr(c6_S0, sh_invrc6_S)));
662 VLJ6_S2 = gmx_mul_pr(sixth_S, gmx_sub_pr(FrLJ6_S2, gmx_mul_pr(c6_S2, sh_invrc6_S)));
664 VLJ12_S0 = gmx_mul_pr(twelveth_S, gmx_sub_pr(FrLJ12_S0, gmx_mul_pr(c12_S0, sh_invrc12_S)));
666 VLJ12_S2 = gmx_mul_pr(twelveth_S, gmx_sub_pr(FrLJ12_S2, gmx_mul_pr(c12_S2, sh_invrc12_S)));
669 VLJ_S0 = gmx_sub_pr(VLJ12_S0, VLJ6_S0);
671 VLJ_S2 = gmx_sub_pr(VLJ12_S2, VLJ6_S2);
673 /* The potential shift should be removed for pairs beyond cut-off */
674 VLJ_S0 = gmx_blendzero_pr(VLJ_S0, wco_vdw_S0);
676 VLJ_S2 = gmx_blendzero_pr(VLJ_S2, wco_vdw_S2);
679 /* The potential shift should be removed for excluded pairs */
680 VLJ_S0 = gmx_blendzero_pr(VLJ_S0, int_S0);
682 VLJ_S2 = gmx_blendzero_pr(VLJ_S2, int_S2);
685 #ifndef ENERGY_GROUPS
686 Vvdwtot_S = gmx_add_pr(Vvdwtot_S,
688 gmx_add_pr(VLJ_S0, VLJ_S2)
694 add_ener_grp_halves(VLJ_S0, vvdwtp[0], vvdwtp[1], egp_jj);
696 add_ener_grp_halves(VLJ_S2, vvdwtp[2], vvdwtp[3], egp_jj);
700 #endif /* CALC_ENERGIES */
703 fscal_S0 = gmx_mul_pr(rinvsq_S0,
705 gmx_add_pr(frcoul_S0,
709 gmx_sub_pr(FrLJ12_S0, FrLJ6_S0)));
711 fscal_S0 = gmx_mul_pr(rinvsq_S0, frcoul_S0);
713 #if defined CALC_LJ && !defined HALF_LJ
714 fscal_S2 = gmx_mul_pr(rinvsq_S2,
716 gmx_add_pr(frcoul_S2,
720 gmx_sub_pr(FrLJ12_S2, FrLJ6_S2)));
722 /* Atom 2 and 3 don't have LJ, so only add Coulomb forces */
723 fscal_S2 = gmx_mul_pr(rinvsq_S2, frcoul_S2);
726 /* Calculate temporary vectorial force */
727 tx_S0 = gmx_mul_pr(fscal_S0, dx_S0);
728 tx_S2 = gmx_mul_pr(fscal_S2, dx_S2);
729 ty_S0 = gmx_mul_pr(fscal_S0, dy_S0);
730 ty_S2 = gmx_mul_pr(fscal_S2, dy_S2);
731 tz_S0 = gmx_mul_pr(fscal_S0, dz_S0);
732 tz_S2 = gmx_mul_pr(fscal_S2, dz_S2);
734 /* Increment i atom force */
735 fix_S0 = gmx_add_pr(fix_S0, tx_S0);
736 fix_S2 = gmx_add_pr(fix_S2, tx_S2);
737 fiy_S0 = gmx_add_pr(fiy_S0, ty_S0);
738 fiy_S2 = gmx_add_pr(fiy_S2, ty_S2);
739 fiz_S0 = gmx_add_pr(fiz_S0, tz_S0);
740 fiz_S2 = gmx_add_pr(fiz_S2, tz_S2);
742 /* Decrement j atom force */
743 gmx_load_hpr(fjx_S, f+ajx);
744 gmx_load_hpr(fjy_S, f+ajy);
745 gmx_load_hpr(fjz_S, f+ajz);
746 gmx_store_hpr(f+ajx, gmx_sub_hpr(fjx_S, gmx_sum4_hpr(tx_S0, tx_S2)));
747 gmx_store_hpr(f+ajy, gmx_sub_hpr(fjy_S, gmx_sum4_hpr(ty_S0, ty_S2)));
748 gmx_store_hpr(f+ajz, gmx_sub_hpr(fjz_S, gmx_sum4_hpr(tz_S0, tz_S2)));