2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 /* Half-width SIMD operations are required here.
38 * As the 4xn kernels are the "standard" kernels and some special operations
39 * are required only here, we define those in nbnxn_kernel_simd_utils_...
41 * Half-width SIMD real type:
44 * Half-width SIMD operations
45 * Load reals at half-width aligned pointer b into half-width SIMD register a:
47 * Set all entries in half-width SIMD register *a to b:
49 * Load one real at b and one real at b+1 into halves of a, respectively:
50 * gmx_load1p1_pr(a, b)
51 * Load reals at half-width aligned pointer b into two halves of a:
53 * Store half-width SIMD register b into half width aligned memory a:
57 * Sum over 4 half SIMD registers:
59 * Sum the elements of halfs of each input register and store sums in out:
60 * gmx_mm_transpose_sum4h_pr(a, b)
61 * Extract two half-width registers *b, *c from a full width register a:
62 * gmx_pr_to_2hpr(a, b, c)
67 const nbnxn_ci_t *nbln;
68 const nbnxn_cj_t *l_cj;
73 const real *nbfp0, *nbfp1, *nbfp2 = NULL, *nbfp3 = NULL;
78 gmx_bool do_LJ, half_LJ, do_coul;
79 int sci, scix, sciy, sciz, sci2;
80 int cjind0, cjind1, cjind;
85 int egps_ishift, egps_imask;
86 int egps_jshift, egps_jmask, egps_jstride;
88 real *vvdwtp[UNROLLI];
95 gmx_mm_pr ix_S0, iy_S0, iz_S0;
96 gmx_mm_pr ix_S2, iy_S2, iz_S2;
97 gmx_mm_pr fix_S0, fiy_S0, fiz_S0;
98 gmx_mm_pr fix_S2, fiy_S2, fiz_S2;
99 /* We use an i-force SIMD register width of 4 */
100 /* The pr4 stuff is defined in nbnxn_kernel_simd_utils.h */
101 gmx_mm_pr4 fix_S, fiy_S, fiz_S;
103 gmx_mm_pr diagonal_jmi_S;
104 #if UNROLLI == UNROLLJ
105 gmx_mm_pb diagonal_mask_S0, diagonal_mask_S2;
107 gmx_mm_pb diagonal_mask0_S0, diagonal_mask0_S2;
108 gmx_mm_pb diagonal_mask1_S0, diagonal_mask1_S2;
111 unsigned *exclusion_filter;
112 gmx_exclfilter filter_S0, filter_S2;
114 gmx_mm_pr zero_S = gmx_set1_pr(0);
116 gmx_mm_pr one_S = gmx_set1_pr(1.0);
117 gmx_mm_pr iq_S0 = gmx_setzero_pr();
118 gmx_mm_pr iq_S2 = gmx_setzero_pr();
121 gmx_mm_pr hrc_3_S, moh_rc_S;
125 /* Coulomb table variables */
127 const real *tab_coul_F;
129 const real *tab_coul_V;
131 int ti0_array[2*GMX_SIMD_WIDTH_HERE], *ti0;
132 int ti2_array[2*GMX_SIMD_WIDTH_HERE], *ti2;
138 #ifdef CALC_COUL_EWALD
139 gmx_mm_pr beta2_S, beta_S;
142 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
143 gmx_mm_pr sh_ewald_S;
149 gmx_mm_pr hsig_i_S0, seps_i_S0;
150 gmx_mm_pr hsig_i_S2, seps_i_S2;
153 real pvdw_array[2*UNROLLI*UNROLLJ+GMX_SIMD_WIDTH_HERE];
154 real *pvdw_c6, *pvdw_c12;
155 gmx_mm_pr c6_S0, c12_S0;
156 gmx_mm_pr c6_S2, c12_S2;
162 gmx_mm_pr c6s_S0, c12s_S0;
163 gmx_mm_pr c6s_S1, c12s_S1;
164 gmx_mm_pr c6s_S2 = gmx_setzero_pr(), c12s_S2 = gmx_setzero_pr();
165 gmx_mm_pr c6s_S3 = gmx_setzero_pr(), c12s_S3 = gmx_setzero_pr();
167 #endif /* LJ_COMB_LB */
169 gmx_mm_pr vctot_S, Vvdwtot_S;
170 gmx_mm_pr sixth_S, twelveth_S;
172 gmx_mm_pr avoid_sing_S;
174 #ifdef VDW_CUTOFF_CHECK
179 gmx_mm_pr sh_invrc6_S, sh_invrc12_S;
181 /* cppcheck-suppress unassignedVariable */
182 real tmpsum_array[2*GMX_SIMD_WIDTH_HERE], *tmpsum;
184 #ifdef CALC_SHIFTFORCES
185 /* cppcheck-suppress unassignedVariable */
186 real shf_array[2*GMX_SIMD_WIDTH_HERE], *shf;
195 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
198 /* No combination rule used */
199 nbfp_ptr = (4 == nbfp_stride) ? nbat->nbfp_s4 : nbat->nbfp;
202 /* Load j-i for the first i */
203 diagonal_jmi_S = gmx_load_pr(nbat->simd_2xnn_diagonal_j_minus_i);
204 /* Generate all the diagonal masks as comparison results */
205 #if UNROLLI == UNROLLJ
206 diagonal_mask_S0 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
207 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
208 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
209 diagonal_mask_S2 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
211 #if 2*UNROLLI == UNROLLJ
212 diagonal_mask0_S0 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
213 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
214 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
215 diagonal_mask0_S2 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
216 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
217 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
218 diagonal_mask1_S0 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
219 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
220 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
221 diagonal_mask1_S2 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
225 /* Load masks for topology exclusion masking. filter_stride is
226 static const, so the conditional will be optimized away. */
227 if (1 == filter_stride)
229 exclusion_filter = nbat->simd_exclusion_filter1;
231 else /* (2 == filter_stride) */
233 exclusion_filter = nbat->simd_exclusion_filter2;
236 /* Here we cast the exclusion masks from unsigned * to int * or
237 * real *. Since we only check bits, the actual value they
238 * represent does not matter, as long as both mask and exclusion
239 * info are treated the same way.
241 filter_S0 = gmx_load_exclusion_filter(exclusion_filter + 0*2*UNROLLJ*filter_stride);
242 filter_S2 = gmx_load_exclusion_filter(exclusion_filter + 1*2*UNROLLJ*filter_stride);
245 /* Generate aligned table index pointers */
246 ti0 = prepare_table_load_buffer(ti0_array);
247 ti2 = prepare_table_load_buffer(ti2_array);
249 invtsp_S = gmx_set1_pr(ic->tabq_scale);
251 mhalfsp_S = gmx_set1_pr(-0.5/ic->tabq_scale);
255 tab_coul_F = ic->tabq_coul_FDV0;
257 tab_coul_F = ic->tabq_coul_F;
258 tab_coul_V = ic->tabq_coul_V;
260 #endif /* CALC_COUL_TAB */
262 #ifdef CALC_COUL_EWALD
263 beta2_S = gmx_set1_pr(ic->ewaldcoeff_q*ic->ewaldcoeff_q);
264 beta_S = gmx_set1_pr(ic->ewaldcoeff_q);
267 #if (defined CALC_COUL_TAB || defined CALC_COUL_EWALD) && defined CALC_ENERGIES
268 sh_ewald_S = gmx_set1_pr(ic->sh_ewald);
274 shiftvec = shift_vec[0];
277 avoid_sing_S = gmx_set1_pr(NBNXN_AVOID_SING_R2_INC);
279 /* The kernel either supports rcoulomb = rvdw or rcoulomb >= rvdw */
280 rc2_S = gmx_set1_pr(ic->rcoulomb*ic->rcoulomb);
281 #ifdef VDW_CUTOFF_CHECK
282 rcvdw2_S = gmx_set1_pr(ic->rvdw*ic->rvdw);
286 sixth_S = gmx_set1_pr(1.0/6.0);
287 twelveth_S = gmx_set1_pr(1.0/12.0);
289 sh_invrc6_S = gmx_set1_pr(ic->sh_invrc6);
290 sh_invrc12_S = gmx_set1_pr(ic->sh_invrc6*ic->sh_invrc6);
293 mrc_3_S = gmx_set1_pr(-2*ic->k_rf);
296 hrc_3_S = gmx_set1_pr(ic->k_rf);
298 moh_rc_S = gmx_set1_pr(-ic->c_rf);
302 tmpsum = gmx_simd_align_real(tmpsum_array);
304 #ifdef CALC_SHIFTFORCES
305 shf = gmx_simd_align_real(shf_array);
309 pvdw_c6 = gmx_simd_align_real(pvdw_array);
310 pvdw_c12 = pvdw_c6 + UNROLLI*UNROLLJ;
312 for (jp = 0; jp < UNROLLJ; jp++)
314 pvdw_c6 [0*UNROLLJ+jp] = nbat->nbfp[0*2];
315 pvdw_c6 [1*UNROLLJ+jp] = nbat->nbfp[0*2];
316 pvdw_c6 [2*UNROLLJ+jp] = nbat->nbfp[0*2];
317 pvdw_c6 [3*UNROLLJ+jp] = nbat->nbfp[0*2];
319 pvdw_c12[0*UNROLLJ+jp] = nbat->nbfp[0*2+1];
320 pvdw_c12[1*UNROLLJ+jp] = nbat->nbfp[0*2+1];
321 pvdw_c12[2*UNROLLJ+jp] = nbat->nbfp[0*2+1];
322 pvdw_c12[3*UNROLLJ+jp] = nbat->nbfp[0*2+1];
324 c6_S0 = gmx_load_pr(pvdw_c6 +0*UNROLLJ);
325 c6_S1 = gmx_load_pr(pvdw_c6 +1*UNROLLJ);
326 c6_S2 = gmx_load_pr(pvdw_c6 +2*UNROLLJ);
327 c6_S3 = gmx_load_pr(pvdw_c6 +3*UNROLLJ);
329 c12_S0 = gmx_load_pr(pvdw_c12+0*UNROLLJ);
330 c12_S1 = gmx_load_pr(pvdw_c12+1*UNROLLJ);
331 c12_S2 = gmx_load_pr(pvdw_c12+2*UNROLLJ);
332 c12_S3 = gmx_load_pr(pvdw_c12+3*UNROLLJ);
333 #endif /* FIX_LJ_C */
336 egps_ishift = nbat->neg_2log;
337 egps_imask = (1<<egps_ishift) - 1;
338 egps_jshift = 2*nbat->neg_2log;
339 egps_jmask = (1<<egps_jshift) - 1;
340 egps_jstride = (UNROLLJ>>1)*UNROLLJ;
341 /* Major division is over i-particle energy groups, determine the stride */
342 Vstride_i = nbat->nenergrp*(1<<nbat->neg_2log)*egps_jstride;
348 for (n = 0; n < nbl->nci; n++)
352 ish = (nbln->shift & NBNXN_CI_SHIFT);
354 cjind0 = nbln->cj_ind_start;
355 cjind1 = nbln->cj_ind_end;
357 ci_sh = (ish == CENTRAL ? ci : -1);
359 shX_S = gmx_load1_pr(shiftvec+ish3);
360 shY_S = gmx_load1_pr(shiftvec+ish3+1);
361 shZ_S = gmx_load1_pr(shiftvec+ish3+2);
368 sci = (ci>>1)*STRIDE;
369 scix = sci*DIM + (ci & 1)*(STRIDE>>1);
370 sci2 = sci*2 + (ci & 1)*(STRIDE>>1);
371 sci += (ci & 1)*(STRIDE>>1);
374 /* We have 5 LJ/C combinations, but use only three inner loops,
375 * as the other combinations are unlikely and/or not much faster:
376 * inner half-LJ + C for half-LJ + C / no-LJ + C
377 * inner LJ + C for full-LJ + C
378 * inner LJ for full-LJ + no-C / half-LJ + no-C
380 do_LJ = (nbln->shift & NBNXN_CI_DO_LJ(0));
381 do_coul = (nbln->shift & NBNXN_CI_DO_COUL(0));
382 half_LJ = ((nbln->shift & NBNXN_CI_HALF_LJ(0)) || !do_LJ) && do_coul;
385 egps_i = nbat->energrp[ci];
389 for (ia = 0; ia < UNROLLI; ia++)
391 egp_ia = (egps_i >> (ia*egps_ishift)) & egps_imask;
392 vvdwtp[ia] = Vvdw + egp_ia*Vstride_i;
393 vctp[ia] = Vc + egp_ia*Vstride_i;
397 #if defined CALC_ENERGIES
399 if (do_coul && l_cj[nbln->cj_ind_start].cj == ci_sh)
402 if (do_coul && l_cj[nbln->cj_ind_start].cj == (ci_sh>>1))
409 Vc_sub_self = 0.5*ic->c_rf;
413 Vc_sub_self = 0.5*tab_coul_F[2];
415 Vc_sub_self = 0.5*tab_coul_V[0];
418 #ifdef CALC_COUL_EWALD
420 Vc_sub_self = 0.5*ic->ewaldcoeff_q*M_2_SQRTPI;
423 for (ia = 0; ia < UNROLLI; ia++)
429 vctp[ia][((egps_i>>(ia*egps_ishift)) & egps_imask)*egps_jstride]
433 -= facel*qi*qi*Vc_sub_self;
438 /* Load i atom data */
439 sciy = scix + STRIDE;
440 sciz = sciy + STRIDE;
441 gmx_load1p1_pr(&ix_S0, x+scix);
442 gmx_load1p1_pr(&ix_S2, x+scix+2);
443 gmx_load1p1_pr(&iy_S0, x+sciy);
444 gmx_load1p1_pr(&iy_S2, x+sciy+2);
445 gmx_load1p1_pr(&iz_S0, x+sciz);
446 gmx_load1p1_pr(&iz_S2, x+sciz+2);
447 ix_S0 = gmx_add_pr(ix_S0, shX_S);
448 ix_S2 = gmx_add_pr(ix_S2, shX_S);
449 iy_S0 = gmx_add_pr(iy_S0, shY_S);
450 iy_S2 = gmx_add_pr(iy_S2, shY_S);
451 iz_S0 = gmx_add_pr(iz_S0, shZ_S);
452 iz_S2 = gmx_add_pr(iz_S2, shZ_S);
458 facel_S = gmx_set1_pr(facel);
460 gmx_load1p1_pr(&iq_S0, q+sci);
461 gmx_load1p1_pr(&iq_S2, q+sci+2);
462 iq_S0 = gmx_mul_pr(facel_S, iq_S0);
463 iq_S2 = gmx_mul_pr(facel_S, iq_S2);
467 gmx_load1p1_pr(&hsig_i_S0, ljc+sci2+0);
468 gmx_load1p1_pr(&hsig_i_S2, ljc+sci2+2);
469 gmx_load1p1_pr(&seps_i_S0, ljc+sci2+STRIDE+0);
470 gmx_load1p1_pr(&seps_i_S2, ljc+sci2+STRIDE+2);
473 gmx_load1p1_pr(&c6s_S0, ljc+sci2+0);
476 gmx_load1p1_pr(&c6s_S2, ljc+sci2+2);
478 gmx_load1p1_pr(&c12s_S0, ljc+sci2+STRIDE+0);
481 gmx_load1p1_pr(&c12s_S2, ljc+sci2+STRIDE+2);
484 nbfp0 = nbfp_ptr + type[sci ]*nbat->ntype*nbfp_stride;
485 nbfp1 = nbfp_ptr + type[sci+1]*nbat->ntype*nbfp_stride;
488 nbfp2 = nbfp_ptr + type[sci+2]*nbat->ntype*nbfp_stride;
489 nbfp3 = nbfp_ptr + type[sci+3]*nbat->ntype*nbfp_stride;
494 /* Zero the potential energy for this list */
495 Vvdwtot_S = gmx_setzero_pr();
496 vctot_S = gmx_setzero_pr();
498 /* Clear i atom forces */
499 fix_S0 = gmx_setzero_pr();
500 fix_S2 = gmx_setzero_pr();
501 fiy_S0 = gmx_setzero_pr();
502 fiy_S2 = gmx_setzero_pr();
503 fiz_S0 = gmx_setzero_pr();
504 fiz_S2 = gmx_setzero_pr();
508 /* Currently all kernels use (at least half) LJ */
515 while (cjind < cjind1 && nbl->cj[cjind].excl != NBNXN_INTERACTION_MASK_ALL)
517 #include "nbnxn_kernel_simd_2xnn_inner.h"
521 for (; (cjind < cjind1); cjind++)
523 #include "nbnxn_kernel_simd_2xnn_inner.h"
532 while (cjind < cjind1 && nbl->cj[cjind].excl != NBNXN_INTERACTION_MASK_ALL)
534 #include "nbnxn_kernel_simd_2xnn_inner.h"
538 for (; (cjind < cjind1); cjind++)
540 #include "nbnxn_kernel_simd_2xnn_inner.h"
547 while (cjind < cjind1 && nbl->cj[cjind].excl != NBNXN_INTERACTION_MASK_ALL)
549 #include "nbnxn_kernel_simd_2xnn_inner.h"
553 for (; (cjind < cjind1); cjind++)
555 #include "nbnxn_kernel_simd_2xnn_inner.h"
559 ninner += cjind1 - cjind0;
561 /* Add accumulated i-forces to the force array */
562 fix_S = gmx_mm_transpose_sum4h_pr(fix_S0, fix_S2);
563 gmx_store_pr4(f+scix, gmx_add_pr4(fix_S, gmx_load_pr4(f+scix)));
565 fiy_S = gmx_mm_transpose_sum4h_pr(fiy_S0, fiy_S2);
566 gmx_store_pr4(f+sciy, gmx_add_pr4(fiy_S, gmx_load_pr4(f+sciy)));
568 fiz_S = gmx_mm_transpose_sum4h_pr(fiz_S0, fiz_S2);
569 gmx_store_pr4(f+sciz, gmx_add_pr4(fiz_S, gmx_load_pr4(f+sciz)));
571 #ifdef CALC_SHIFTFORCES
572 fshift[ish3+0] += gmx_sum_simd4(fix_S, shf);
573 fshift[ish3+1] += gmx_sum_simd4(fiy_S, shf);
574 fshift[ish3+2] += gmx_sum_simd4(fiz_S, shf);
580 *Vc += gmx_sum_simd(vctot_S, tmpsum);
582 *Vvdw += gmx_sum_simd(Vvdwtot_S, tmpsum);
585 /* Outer loop uses 6 flops/iteration */
589 printf("atom pairs %d\n", npair);