2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2009, The GROMACS Development Team
6 * Copyright (c) 2012, by the GROMACS development team, led by
7 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
8 * others, as listed in the AUTHORS file in the top-level source
9 * directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
38 /* This is the innermost loop contents for the n vs n atom
39 * SSE2 single precision kernels.
43 /* When calculating RF or Ewald interactions we calculate the electrostatic
44 * forces on excluded atom pairs here in the non-bonded loops.
45 * But when energies and/or virial is required we calculate them
46 * separately to as then it is easier to separate the energy and virial
49 #if defined CHECK_EXCLS && defined CALC_COULOMB
53 /* Without exclusions and energies we only need to mask the cut-off,
54 * this can be faster with blendv (only available with SSE4.1 and later).
56 #if !(defined CHECK_EXCLS || defined CALC_ENERGIES) && defined GMX_X86_SSE4_1 && !defined COUNT_PAIRS
57 /* With RF and tabulated Coulomb we replace cmp+and with sub+blendv.
58 * With gcc this is slower, except for RF on Sandy Bridge.
59 * Tested with gcc 4.6.2, 4.6.3 and 4.7.1.
61 #if (defined CALC_COUL_RF || defined CALC_COUL_TAB) && (!defined __GNUC__ || (defined CALC_COUL_RF && defined GMX_X86_AVX_256))
64 /* With analytical Ewald we replace cmp+and+and with sub+blendv+blendv.
65 * This is only faster with icc on Sandy Bridge (PS kernel slower than gcc 4.7).
68 #if defined CALC_COUL_EWALD && defined __INTEL_COMPILER && defined GMX_X86_AVX_256
74 int cj,aj,ajx,ajy,ajz;
77 /* Energy group indices for two atoms packed into one int */
78 int egp_jj[UNROLLJ/2];
82 /* Interaction (non-exclusion) mask of all 1's or 0's */
89 gmx_mm_pr jxSSE,jySSE,jzSSE;
90 gmx_mm_pr dx_SSE0,dy_SSE0,dz_SSE0;
91 gmx_mm_pr dx_SSE1,dy_SSE1,dz_SSE1;
92 gmx_mm_pr dx_SSE2,dy_SSE2,dz_SSE2;
93 gmx_mm_pr dx_SSE3,dy_SSE3,dz_SSE3;
94 gmx_mm_pr tx_SSE0,ty_SSE0,tz_SSE0;
95 gmx_mm_pr tx_SSE1,ty_SSE1,tz_SSE1;
96 gmx_mm_pr tx_SSE2,ty_SSE2,tz_SSE2;
97 gmx_mm_pr tx_SSE3,ty_SSE3,tz_SSE3;
98 gmx_mm_pr rsq_SSE0,rinv_SSE0,rinvsq_SSE0;
99 gmx_mm_pr rsq_SSE1,rinv_SSE1,rinvsq_SSE1;
100 gmx_mm_pr rsq_SSE2,rinv_SSE2,rinvsq_SSE2;
101 gmx_mm_pr rsq_SSE3,rinv_SSE3,rinvsq_SSE3;
102 #ifndef CUTOFF_BLENDV
103 /* wco: within cut-off, mask of all 1's or 0's */
109 #ifdef VDW_CUTOFF_CHECK
110 gmx_mm_pr wco_vdw_SSE0;
111 gmx_mm_pr wco_vdw_SSE1;
113 gmx_mm_pr wco_vdw_SSE2;
114 gmx_mm_pr wco_vdw_SSE3;
119 /* 1/r masked with the interaction mask */
120 gmx_mm_pr rinv_ex_SSE0;
121 gmx_mm_pr rinv_ex_SSE1;
122 gmx_mm_pr rinv_ex_SSE2;
123 gmx_mm_pr rinv_ex_SSE3;
131 /* The force (PME mesh force) we need to subtract from 1/r^2 */
137 #ifdef CALC_COUL_EWALD
138 gmx_mm_pr brsq_SSE0,brsq_SSE1,brsq_SSE2,brsq_SSE3;
139 gmx_mm_pr ewcorr_SSE0,ewcorr_SSE1,ewcorr_SSE2,ewcorr_SSE3;
142 /* frcoul = (1/r - fsub)*r */
143 gmx_mm_pr frcoul_SSE0;
144 gmx_mm_pr frcoul_SSE1;
145 gmx_mm_pr frcoul_SSE2;
146 gmx_mm_pr frcoul_SSE3;
148 /* For tables: r, rs=r/sp, rf=floor(rs), frac=rs-rf */
149 gmx_mm_pr r_SSE0,rs_SSE0,rf_SSE0,frac_SSE0;
150 gmx_mm_pr r_SSE1,rs_SSE1,rf_SSE1,frac_SSE1;
151 gmx_mm_pr r_SSE2,rs_SSE2,rf_SSE2,frac_SSE2;
152 gmx_mm_pr r_SSE3,rs_SSE3,rf_SSE3,frac_SSE3;
153 /* Table index: rs converted to an int */
154 #if !(defined GMX_MM256_HERE && defined GMX_DOUBLE)
155 gmx_epi32 ti_SSE0,ti_SSE1,ti_SSE2,ti_SSE3;
157 __m128i ti_SSE0,ti_SSE1,ti_SSE2,ti_SSE3;
159 /* Linear force table values */
160 gmx_mm_pr ctab0_SSE0,ctab1_SSE0;
161 gmx_mm_pr ctab0_SSE1,ctab1_SSE1;
162 gmx_mm_pr ctab0_SSE2,ctab1_SSE2;
163 gmx_mm_pr ctab0_SSE3,ctab1_SSE3;
165 /* Quadratic energy table value */
166 gmx_mm_pr ctabv_SSE0;
167 gmx_mm_pr ctabv_SSE1;
168 gmx_mm_pr ctabv_SSE2;
169 gmx_mm_pr ctabv_SSE3;
172 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
173 /* The potential (PME mesh) we need to subtract from 1/r */
174 gmx_mm_pr vc_sub_SSE0;
175 gmx_mm_pr vc_sub_SSE1;
176 gmx_mm_pr vc_sub_SSE2;
177 gmx_mm_pr vc_sub_SSE3;
180 /* Electrostatic potential */
181 gmx_mm_pr vcoul_SSE0;
182 gmx_mm_pr vcoul_SSE1;
183 gmx_mm_pr vcoul_SSE2;
184 gmx_mm_pr vcoul_SSE3;
187 /* The force times 1/r */
188 gmx_mm_pr fscal_SSE0;
189 gmx_mm_pr fscal_SSE1;
190 gmx_mm_pr fscal_SSE2;
191 gmx_mm_pr fscal_SSE3;
195 /* LJ sigma_j/2 and sqrt(epsilon_j) */
196 gmx_mm_pr hsig_j_SSE,seps_j_SSE;
197 /* LJ sigma_ij and epsilon_ij */
198 gmx_mm_pr sig_SSE0,eps_SSE0;
199 gmx_mm_pr sig_SSE1,eps_SSE1;
201 gmx_mm_pr sig_SSE2,eps_SSE2;
202 gmx_mm_pr sig_SSE3,eps_SSE3;
205 gmx_mm_pr sig2_SSE0,sig6_SSE0;
206 gmx_mm_pr sig2_SSE1,sig6_SSE1;
208 gmx_mm_pr sig2_SSE2,sig6_SSE2;
209 gmx_mm_pr sig2_SSE3,sig6_SSE3;
211 #endif /* LJ_COMB_LB */
215 gmx_mm_pr c6s_j_SSE,c12s_j_SSE;
218 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
219 /* Index for loading LJ parameters, complicated when interleaving */
224 /* LJ C6 and C12 parameters, used with geometric comb. rule */
225 gmx_mm_pr c6_SSE0,c12_SSE0;
226 gmx_mm_pr c6_SSE1,c12_SSE1;
228 gmx_mm_pr c6_SSE2,c12_SSE2;
229 gmx_mm_pr c6_SSE3,c12_SSE3;
233 /* Intermediate variables for LJ calculation */
235 gmx_mm_pr rinvsix_SSE0;
236 gmx_mm_pr rinvsix_SSE1;
238 gmx_mm_pr rinvsix_SSE2;
239 gmx_mm_pr rinvsix_SSE3;
243 gmx_mm_pr sir_SSE0,sir2_SSE0,sir6_SSE0;
244 gmx_mm_pr sir_SSE1,sir2_SSE1,sir6_SSE1;
246 gmx_mm_pr sir_SSE2,sir2_SSE2,sir6_SSE2;
247 gmx_mm_pr sir_SSE3,sir2_SSE3,sir6_SSE3;
251 gmx_mm_pr FrLJ6_SSE0,FrLJ12_SSE0;
252 gmx_mm_pr FrLJ6_SSE1,FrLJ12_SSE1;
254 gmx_mm_pr FrLJ6_SSE2,FrLJ12_SSE2;
255 gmx_mm_pr FrLJ6_SSE3,FrLJ12_SSE3;
258 gmx_mm_pr VLJ6_SSE0,VLJ12_SSE0,VLJ_SSE0;
259 gmx_mm_pr VLJ6_SSE1,VLJ12_SSE1,VLJ_SSE1;
261 gmx_mm_pr VLJ6_SSE2,VLJ12_SSE2,VLJ_SSE2;
262 gmx_mm_pr VLJ6_SSE3,VLJ12_SSE3,VLJ_SSE3;
267 /* j-cluster index */
270 /* Atom indices (of the first atom in the cluster) */
272 #if defined CALC_LJ && (defined LJ_COMB_GEOM || defined LJ_COMB_LB)
273 #if UNROLLJ == STRIDE
276 aj2 = (cj>>1)*2*STRIDE + (cj & 1)*UNROLLJ;
279 #if UNROLLJ == STRIDE
282 ajx = (cj>>1)*DIM*STRIDE + (cj & 1)*UNROLLJ;
288 #ifndef GMX_MM256_HERE
290 /* Load integer interaction mask */
291 __m128i mask_int = _mm_set1_epi32(l_cj[cjind].excl);
293 /* The is no unequal sse instruction, so we need a not here */
294 int_SSE0 = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask0),zeroi_SSE));
295 int_SSE1 = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask1),zeroi_SSE));
296 int_SSE2 = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask2),zeroi_SSE));
297 int_SSE3 = gmx_mm_castsi128_pr(_mm_cmpeq_epi32(_mm_andnot_si128(mask_int,mask3),zeroi_SSE));
302 /* Load integer interaction mask */
303 /* With AVX there are no integer operations, so cast to real */
304 gmx_mm_pr mask_pr = gmx_mm_castsi256_pr(_mm256_set1_epi32(l_cj[cjind].excl));
305 /* We can't compare all 4*8=32 float bits: shift the mask */
306 gmx_mm_pr masksh_pr = gmx_mm_castsi256_pr(_mm256_set1_epi32(l_cj[cjind].excl>>(2*UNROLLJ)));
307 /* Intel Compiler version 12.1.3 20120130 is buggy: use cast.
308 * With gcc we don't need the cast, but it's faster.
310 #define cast_cvt(x) _mm256_cvtepi32_ps(_mm256_castps_si256(x))
311 int_SSE0 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(mask_pr,mask0)),zero_SSE);
312 int_SSE1 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(mask_pr,mask1)),zero_SSE);
313 int_SSE2 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(masksh_pr,mask0)),zero_SSE);
314 int_SSE3 = gmx_cmpneq_pr(cast_cvt(gmx_and_pr(masksh_pr,mask1)),zero_SSE);
317 /* Load integer interaction mask */
318 /* With AVX there are no integer operations,
319 * and there is no int to double conversion, so cast to float
321 __m256 mask_ps = _mm256_castsi256_ps(_mm256_set1_epi32(l_cj[cjind].excl));
322 #define cast_cvt(x) _mm256_castps_pd(_mm256_cvtepi32_ps(_mm256_castps_si256(x)))
323 int_SSE0 = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask0)),zero_SSE);
324 int_SSE1 = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask1)),zero_SSE);
325 int_SSE2 = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask2)),zero_SSE);
326 int_SSE3 = gmx_cmpneq_pr(cast_cvt(_mm256_and_ps(mask_ps,mask3)),zero_SSE);
332 /* load j atom coordinates */
333 jxSSE = gmx_load_pr(x+ajx);
334 jySSE = gmx_load_pr(x+ajy);
335 jzSSE = gmx_load_pr(x+ajz);
337 /* Calculate distance */
338 dx_SSE0 = gmx_sub_pr(ix_SSE0,jxSSE);
339 dy_SSE0 = gmx_sub_pr(iy_SSE0,jySSE);
340 dz_SSE0 = gmx_sub_pr(iz_SSE0,jzSSE);
341 dx_SSE1 = gmx_sub_pr(ix_SSE1,jxSSE);
342 dy_SSE1 = gmx_sub_pr(iy_SSE1,jySSE);
343 dz_SSE1 = gmx_sub_pr(iz_SSE1,jzSSE);
344 dx_SSE2 = gmx_sub_pr(ix_SSE2,jxSSE);
345 dy_SSE2 = gmx_sub_pr(iy_SSE2,jySSE);
346 dz_SSE2 = gmx_sub_pr(iz_SSE2,jzSSE);
347 dx_SSE3 = gmx_sub_pr(ix_SSE3,jxSSE);
348 dy_SSE3 = gmx_sub_pr(iy_SSE3,jySSE);
349 dz_SSE3 = gmx_sub_pr(iz_SSE3,jzSSE);
351 /* rsq = dx*dx+dy*dy+dz*dz */
352 rsq_SSE0 = gmx_calc_rsq_pr(dx_SSE0,dy_SSE0,dz_SSE0);
353 rsq_SSE1 = gmx_calc_rsq_pr(dx_SSE1,dy_SSE1,dz_SSE1);
354 rsq_SSE2 = gmx_calc_rsq_pr(dx_SSE2,dy_SSE2,dz_SSE2);
355 rsq_SSE3 = gmx_calc_rsq_pr(dx_SSE3,dy_SSE3,dz_SSE3);
357 #ifndef CUTOFF_BLENDV
358 wco_SSE0 = gmx_cmplt_pr(rsq_SSE0,rc2_SSE);
359 wco_SSE1 = gmx_cmplt_pr(rsq_SSE1,rc2_SSE);
360 wco_SSE2 = gmx_cmplt_pr(rsq_SSE2,rc2_SSE);
361 wco_SSE3 = gmx_cmplt_pr(rsq_SSE3,rc2_SSE);
366 /* Only remove the (sub-)diagonal to avoid double counting */
367 #if UNROLLJ == UNROLLI
370 wco_SSE0 = gmx_and_pr(wco_SSE0,diag_SSE0);
371 wco_SSE1 = gmx_and_pr(wco_SSE1,diag_SSE1);
372 wco_SSE2 = gmx_and_pr(wco_SSE2,diag_SSE2);
373 wco_SSE3 = gmx_and_pr(wco_SSE3,diag_SSE3);
376 #if UNROLLJ < UNROLLI
379 wco_SSE0 = gmx_and_pr(wco_SSE0,diag0_SSE0);
380 wco_SSE1 = gmx_and_pr(wco_SSE1,diag0_SSE1);
381 wco_SSE2 = gmx_and_pr(wco_SSE2,diag0_SSE2);
382 wco_SSE3 = gmx_and_pr(wco_SSE3,diag0_SSE3);
384 if (cj == ci_sh*2 + 1)
386 wco_SSE0 = gmx_and_pr(wco_SSE0,diag1_SSE0);
387 wco_SSE1 = gmx_and_pr(wco_SSE1,diag1_SSE1);
388 wco_SSE2 = gmx_and_pr(wco_SSE2,diag1_SSE2);
389 wco_SSE3 = gmx_and_pr(wco_SSE3,diag1_SSE3);
394 wco_SSE0 = gmx_and_pr(wco_SSE0,diag0_SSE0);
395 wco_SSE1 = gmx_and_pr(wco_SSE1,diag0_SSE1);
396 wco_SSE2 = gmx_and_pr(wco_SSE2,diag0_SSE2);
397 wco_SSE3 = gmx_and_pr(wco_SSE3,diag0_SSE3);
399 else if (cj*2 + 1 == ci_sh)
401 wco_SSE0 = gmx_and_pr(wco_SSE0,diag1_SSE0);
402 wco_SSE1 = gmx_and_pr(wco_SSE1,diag1_SSE1);
403 wco_SSE2 = gmx_and_pr(wco_SSE2,diag1_SSE2);
404 wco_SSE3 = gmx_and_pr(wco_SSE3,diag1_SSE3);
408 #else /* EXCL_FORCES */
409 /* Remove all excluded atom pairs from the list */
410 wco_SSE0 = gmx_and_pr(wco_SSE0,int_SSE0);
411 wco_SSE1 = gmx_and_pr(wco_SSE1,int_SSE1);
412 wco_SSE2 = gmx_and_pr(wco_SSE2,int_SSE2);
413 wco_SSE3 = gmx_and_pr(wco_SSE3,int_SSE3);
421 for(i=0; i<UNROLLI; i++)
423 gmx_storeu_pr(tmp,i==0 ? wco_SSE0 : (i==1 ? wco_SSE1 : (i==2 ? wco_SSE2 : wco_SSE3)));
424 for(j=0; j<UNROLLJ; j++)
436 /* For excluded pairs add a small number to avoid r^-6 = NaN */
437 rsq_SSE0 = gmx_add_pr(rsq_SSE0,gmx_andnot_pr(int_SSE0,avoid_sing_SSE));
438 rsq_SSE1 = gmx_add_pr(rsq_SSE1,gmx_andnot_pr(int_SSE1,avoid_sing_SSE));
439 rsq_SSE2 = gmx_add_pr(rsq_SSE2,gmx_andnot_pr(int_SSE2,avoid_sing_SSE));
440 rsq_SSE3 = gmx_add_pr(rsq_SSE3,gmx_andnot_pr(int_SSE3,avoid_sing_SSE));
445 rinv_SSE0 = gmx_invsqrt_pr(rsq_SSE0);
446 rinv_SSE1 = gmx_invsqrt_pr(rsq_SSE1);
447 rinv_SSE2 = gmx_invsqrt_pr(rsq_SSE2);
448 rinv_SSE3 = gmx_invsqrt_pr(rsq_SSE3);
450 GMX_MM_INVSQRT2_PD(rsq_SSE0,rsq_SSE1,rinv_SSE0,rinv_SSE1);
451 GMX_MM_INVSQRT2_PD(rsq_SSE2,rsq_SSE3,rinv_SSE2,rinv_SSE3);
455 /* Load parameters for j atom */
456 jq_SSE = gmx_load_pr(q+aj);
457 qq_SSE0 = gmx_mul_pr(iq_SSE0,jq_SSE);
458 qq_SSE1 = gmx_mul_pr(iq_SSE1,jq_SSE);
459 qq_SSE2 = gmx_mul_pr(iq_SSE2,jq_SSE);
460 qq_SSE3 = gmx_mul_pr(iq_SSE3,jq_SSE);
465 #if !defined LJ_COMB_GEOM && !defined LJ_COMB_LB && !defined FIX_LJ_C
466 load_lj_pair_params(nbfp0,type,aj,c6_SSE0,c12_SSE0);
467 load_lj_pair_params(nbfp1,type,aj,c6_SSE1,c12_SSE1);
469 load_lj_pair_params(nbfp2,type,aj,c6_SSE2,c12_SSE2);
470 load_lj_pair_params(nbfp3,type,aj,c6_SSE3,c12_SSE3);
472 #endif /* not defined any LJ rule */
475 c6s_j_SSE = gmx_load_pr(ljc+aj2+0);
476 c12s_j_SSE = gmx_load_pr(ljc+aj2+STRIDE);
477 c6_SSE0 = gmx_mul_pr(c6s_SSE0 ,c6s_j_SSE );
478 c6_SSE1 = gmx_mul_pr(c6s_SSE1 ,c6s_j_SSE );
480 c6_SSE2 = gmx_mul_pr(c6s_SSE2 ,c6s_j_SSE );
481 c6_SSE3 = gmx_mul_pr(c6s_SSE3 ,c6s_j_SSE );
483 c12_SSE0 = gmx_mul_pr(c12s_SSE0,c12s_j_SSE);
484 c12_SSE1 = gmx_mul_pr(c12s_SSE1,c12s_j_SSE);
486 c12_SSE2 = gmx_mul_pr(c12s_SSE2,c12s_j_SSE);
487 c12_SSE3 = gmx_mul_pr(c12s_SSE3,c12s_j_SSE);
489 #endif /* LJ_COMB_GEOM */
492 hsig_j_SSE = gmx_load_pr(ljc+aj2+0);
493 seps_j_SSE = gmx_load_pr(ljc+aj2+STRIDE);
495 sig_SSE0 = gmx_add_pr(hsig_i_SSE0,hsig_j_SSE);
496 sig_SSE1 = gmx_add_pr(hsig_i_SSE1,hsig_j_SSE);
497 eps_SSE0 = gmx_mul_pr(seps_i_SSE0,seps_j_SSE);
498 eps_SSE1 = gmx_mul_pr(seps_i_SSE1,seps_j_SSE);
500 sig_SSE2 = gmx_add_pr(hsig_i_SSE2,hsig_j_SSE);
501 sig_SSE3 = gmx_add_pr(hsig_i_SSE3,hsig_j_SSE);
502 eps_SSE2 = gmx_mul_pr(seps_i_SSE2,seps_j_SSE);
503 eps_SSE3 = gmx_mul_pr(seps_i_SSE3,seps_j_SSE);
505 #endif /* LJ_COMB_LB */
509 #ifndef CUTOFF_BLENDV
510 rinv_SSE0 = gmx_and_pr(rinv_SSE0,wco_SSE0);
511 rinv_SSE1 = gmx_and_pr(rinv_SSE1,wco_SSE1);
512 rinv_SSE2 = gmx_and_pr(rinv_SSE2,wco_SSE2);
513 rinv_SSE3 = gmx_and_pr(rinv_SSE3,wco_SSE3);
515 /* We only need to mask for the cut-off: blendv is faster */
516 rinv_SSE0 = gmx_blendv_pr(rinv_SSE0,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE0));
517 rinv_SSE1 = gmx_blendv_pr(rinv_SSE1,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE1));
518 rinv_SSE2 = gmx_blendv_pr(rinv_SSE2,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE2));
519 rinv_SSE3 = gmx_blendv_pr(rinv_SSE3,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE3));
522 rinvsq_SSE0 = gmx_mul_pr(rinv_SSE0,rinv_SSE0);
523 rinvsq_SSE1 = gmx_mul_pr(rinv_SSE1,rinv_SSE1);
524 rinvsq_SSE2 = gmx_mul_pr(rinv_SSE2,rinv_SSE2);
525 rinvsq_SSE3 = gmx_mul_pr(rinv_SSE3,rinv_SSE3);
528 /* Note that here we calculate force*r, not the usual force/r.
529 * This allows avoiding masking the reaction-field contribution,
530 * as frcoul is later multiplied by rinvsq which has been
531 * masked with the cut-off check.
535 /* Only add 1/r for non-excluded atom pairs */
536 rinv_ex_SSE0 = gmx_and_pr(rinv_SSE0,int_SSE0);
537 rinv_ex_SSE1 = gmx_and_pr(rinv_SSE1,int_SSE1);
538 rinv_ex_SSE2 = gmx_and_pr(rinv_SSE2,int_SSE2);
539 rinv_ex_SSE3 = gmx_and_pr(rinv_SSE3,int_SSE3);
541 /* No exclusion forces, we always need 1/r */
542 #define rinv_ex_SSE0 rinv_SSE0
543 #define rinv_ex_SSE1 rinv_SSE1
544 #define rinv_ex_SSE2 rinv_SSE2
545 #define rinv_ex_SSE3 rinv_SSE3
549 /* Electrostatic interactions */
550 frcoul_SSE0 = gmx_mul_pr(qq_SSE0,gmx_add_pr(rinv_ex_SSE0,gmx_mul_pr(rsq_SSE0,mrc_3_SSE)));
551 frcoul_SSE1 = gmx_mul_pr(qq_SSE1,gmx_add_pr(rinv_ex_SSE1,gmx_mul_pr(rsq_SSE1,mrc_3_SSE)));
552 frcoul_SSE2 = gmx_mul_pr(qq_SSE2,gmx_add_pr(rinv_ex_SSE2,gmx_mul_pr(rsq_SSE2,mrc_3_SSE)));
553 frcoul_SSE3 = gmx_mul_pr(qq_SSE3,gmx_add_pr(rinv_ex_SSE3,gmx_mul_pr(rsq_SSE3,mrc_3_SSE)));
556 vcoul_SSE0 = gmx_mul_pr(qq_SSE0,gmx_add_pr(rinv_ex_SSE0,gmx_add_pr(gmx_mul_pr(rsq_SSE0,hrc_3_SSE),moh_rc_SSE)));
557 vcoul_SSE1 = gmx_mul_pr(qq_SSE1,gmx_add_pr(rinv_ex_SSE1,gmx_add_pr(gmx_mul_pr(rsq_SSE1,hrc_3_SSE),moh_rc_SSE)));
558 vcoul_SSE2 = gmx_mul_pr(qq_SSE2,gmx_add_pr(rinv_ex_SSE2,gmx_add_pr(gmx_mul_pr(rsq_SSE2,hrc_3_SSE),moh_rc_SSE)));
559 vcoul_SSE3 = gmx_mul_pr(qq_SSE3,gmx_add_pr(rinv_ex_SSE3,gmx_add_pr(gmx_mul_pr(rsq_SSE3,hrc_3_SSE),moh_rc_SSE)));
563 #ifdef CALC_COUL_EWALD
564 /* We need to mask (or limit) rsq for the cut-off,
565 * as large distances can cause an overflow in gmx_pmecorrF/V.
567 #ifndef CUTOFF_BLENDV
568 brsq_SSE0 = gmx_mul_pr(beta2_SSE,gmx_and_pr(rsq_SSE0,wco_SSE0));
569 brsq_SSE1 = gmx_mul_pr(beta2_SSE,gmx_and_pr(rsq_SSE1,wco_SSE1));
570 brsq_SSE2 = gmx_mul_pr(beta2_SSE,gmx_and_pr(rsq_SSE2,wco_SSE2));
571 brsq_SSE3 = gmx_mul_pr(beta2_SSE,gmx_and_pr(rsq_SSE3,wco_SSE3));
573 /* Strangely, putting mul on a separate line is slower (icc 13) */
574 brsq_SSE0 = gmx_mul_pr(beta2_SSE,gmx_blendv_pr(rsq_SSE0,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE0)));
575 brsq_SSE1 = gmx_mul_pr(beta2_SSE,gmx_blendv_pr(rsq_SSE1,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE1)));
576 brsq_SSE2 = gmx_mul_pr(beta2_SSE,gmx_blendv_pr(rsq_SSE2,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE2)));
577 brsq_SSE3 = gmx_mul_pr(beta2_SSE,gmx_blendv_pr(rsq_SSE3,zero_SSE,gmx_sub_pr(rc2_SSE,rsq_SSE3)));
579 ewcorr_SSE0 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE0),beta_SSE);
580 ewcorr_SSE1 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE1),beta_SSE);
581 ewcorr_SSE2 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE2),beta_SSE);
582 ewcorr_SSE3 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_SSE3),beta_SSE);
583 frcoul_SSE0 = gmx_mul_pr(qq_SSE0,gmx_add_pr(rinv_ex_SSE0,gmx_mul_pr(ewcorr_SSE0,brsq_SSE0)));
584 frcoul_SSE1 = gmx_mul_pr(qq_SSE1,gmx_add_pr(rinv_ex_SSE1,gmx_mul_pr(ewcorr_SSE1,brsq_SSE1)));
585 frcoul_SSE2 = gmx_mul_pr(qq_SSE2,gmx_add_pr(rinv_ex_SSE2,gmx_mul_pr(ewcorr_SSE2,brsq_SSE2)));
586 frcoul_SSE3 = gmx_mul_pr(qq_SSE3,gmx_add_pr(rinv_ex_SSE3,gmx_mul_pr(ewcorr_SSE3,brsq_SSE3)));
589 vc_sub_SSE0 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE0),beta_SSE);
590 vc_sub_SSE1 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE1),beta_SSE);
591 vc_sub_SSE2 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE2),beta_SSE);
592 vc_sub_SSE3 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_SSE3),beta_SSE);
595 #endif /* CALC_COUL_EWALD */
598 /* Electrostatic interactions */
599 r_SSE0 = gmx_mul_pr(rsq_SSE0,rinv_SSE0);
600 r_SSE1 = gmx_mul_pr(rsq_SSE1,rinv_SSE1);
601 r_SSE2 = gmx_mul_pr(rsq_SSE2,rinv_SSE2);
602 r_SSE3 = gmx_mul_pr(rsq_SSE3,rinv_SSE3);
603 /* Convert r to scaled table units */
604 rs_SSE0 = gmx_mul_pr(r_SSE0,invtsp_SSE);
605 rs_SSE1 = gmx_mul_pr(r_SSE1,invtsp_SSE);
606 rs_SSE2 = gmx_mul_pr(r_SSE2,invtsp_SSE);
607 rs_SSE3 = gmx_mul_pr(r_SSE3,invtsp_SSE);
608 /* Truncate scaled r to an int */
609 ti_SSE0 = gmx_cvttpr_epi32(rs_SSE0);
610 ti_SSE1 = gmx_cvttpr_epi32(rs_SSE1);
611 ti_SSE2 = gmx_cvttpr_epi32(rs_SSE2);
612 ti_SSE3 = gmx_cvttpr_epi32(rs_SSE3);
613 #ifdef GMX_X86_SSE4_1
614 /* SSE4.1 floor is faster than gmx_cvtepi32_ps int->float cast */
615 rf_SSE0 = gmx_floor_pr(rs_SSE0);
616 rf_SSE1 = gmx_floor_pr(rs_SSE1);
617 rf_SSE2 = gmx_floor_pr(rs_SSE2);
618 rf_SSE3 = gmx_floor_pr(rs_SSE3);
620 rf_SSE0 = gmx_cvtepi32_pr(ti_SSE0);
621 rf_SSE1 = gmx_cvtepi32_pr(ti_SSE1);
622 rf_SSE2 = gmx_cvtepi32_pr(ti_SSE2);
623 rf_SSE3 = gmx_cvtepi32_pr(ti_SSE3);
625 frac_SSE0 = gmx_sub_pr(rs_SSE0,rf_SSE0);
626 frac_SSE1 = gmx_sub_pr(rs_SSE1,rf_SSE1);
627 frac_SSE2 = gmx_sub_pr(rs_SSE2,rf_SSE2);
628 frac_SSE3 = gmx_sub_pr(rs_SSE3,rf_SSE3);
630 /* Load and interpolate table forces and possibly energies.
631 * Force and energy can be combined in one table, stride 4: FDV0
632 * or in two separate tables with stride 1: F and V
633 * Currently single precision uses FDV0, double F and V.
635 #ifndef CALC_ENERGIES
636 load_table_f(tab_coul_F,ti_SSE0,ti0,ctab0_SSE0,ctab1_SSE0);
637 load_table_f(tab_coul_F,ti_SSE1,ti1,ctab0_SSE1,ctab1_SSE1);
638 load_table_f(tab_coul_F,ti_SSE2,ti2,ctab0_SSE2,ctab1_SSE2);
639 load_table_f(tab_coul_F,ti_SSE3,ti3,ctab0_SSE3,ctab1_SSE3);
642 load_table_f_v(tab_coul_F,ti_SSE0,ti0,ctab0_SSE0,ctab1_SSE0,ctabv_SSE0);
643 load_table_f_v(tab_coul_F,ti_SSE1,ti1,ctab0_SSE1,ctab1_SSE1,ctabv_SSE1);
644 load_table_f_v(tab_coul_F,ti_SSE2,ti2,ctab0_SSE2,ctab1_SSE2,ctabv_SSE2);
645 load_table_f_v(tab_coul_F,ti_SSE3,ti3,ctab0_SSE3,ctab1_SSE3,ctabv_SSE3);
647 load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE0,ti0,ctab0_SSE0,ctab1_SSE0,ctabv_SSE0);
648 load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE1,ti1,ctab0_SSE1,ctab1_SSE1,ctabv_SSE1);
649 load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE2,ti2,ctab0_SSE2,ctab1_SSE2,ctabv_SSE2);
650 load_table_f_v(tab_coul_F,tab_coul_V,ti_SSE3,ti3,ctab0_SSE3,ctab1_SSE3,ctabv_SSE3);
653 fsub_SSE0 = gmx_add_pr(ctab0_SSE0,gmx_mul_pr(frac_SSE0,ctab1_SSE0));
654 fsub_SSE1 = gmx_add_pr(ctab0_SSE1,gmx_mul_pr(frac_SSE1,ctab1_SSE1));
655 fsub_SSE2 = gmx_add_pr(ctab0_SSE2,gmx_mul_pr(frac_SSE2,ctab1_SSE2));
656 fsub_SSE3 = gmx_add_pr(ctab0_SSE3,gmx_mul_pr(frac_SSE3,ctab1_SSE3));
657 frcoul_SSE0 = gmx_mul_pr(qq_SSE0,gmx_sub_pr(rinv_ex_SSE0,gmx_mul_pr(fsub_SSE0,r_SSE0)));
658 frcoul_SSE1 = gmx_mul_pr(qq_SSE1,gmx_sub_pr(rinv_ex_SSE1,gmx_mul_pr(fsub_SSE1,r_SSE1)));
659 frcoul_SSE2 = gmx_mul_pr(qq_SSE2,gmx_sub_pr(rinv_ex_SSE2,gmx_mul_pr(fsub_SSE2,r_SSE2)));
660 frcoul_SSE3 = gmx_mul_pr(qq_SSE3,gmx_sub_pr(rinv_ex_SSE3,gmx_mul_pr(fsub_SSE3,r_SSE3)));
663 vc_sub_SSE0 = gmx_add_pr(ctabv_SSE0,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE0),gmx_add_pr(ctab0_SSE0,fsub_SSE0)));
664 vc_sub_SSE1 = gmx_add_pr(ctabv_SSE1,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE1),gmx_add_pr(ctab0_SSE1,fsub_SSE1)));
665 vc_sub_SSE2 = gmx_add_pr(ctabv_SSE2,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE2),gmx_add_pr(ctab0_SSE2,fsub_SSE2)));
666 vc_sub_SSE3 = gmx_add_pr(ctabv_SSE3,gmx_mul_pr(gmx_mul_pr(mhalfsp_SSE,frac_SSE3),gmx_add_pr(ctab0_SSE3,fsub_SSE3)));
668 #endif /* CALC_COUL_TAB */
670 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
671 #ifndef NO_SHIFT_EWALD
672 /* Add Ewald potential shift to vc_sub for convenience */
674 vc_sub_SSE0 = gmx_add_pr(vc_sub_SSE0,gmx_and_pr(sh_ewald_SSE,int_SSE0));
675 vc_sub_SSE1 = gmx_add_pr(vc_sub_SSE1,gmx_and_pr(sh_ewald_SSE,int_SSE1));
676 vc_sub_SSE2 = gmx_add_pr(vc_sub_SSE2,gmx_and_pr(sh_ewald_SSE,int_SSE2));
677 vc_sub_SSE3 = gmx_add_pr(vc_sub_SSE3,gmx_and_pr(sh_ewald_SSE,int_SSE3));
679 vc_sub_SSE0 = gmx_add_pr(vc_sub_SSE0,sh_ewald_SSE);
680 vc_sub_SSE1 = gmx_add_pr(vc_sub_SSE1,sh_ewald_SSE);
681 vc_sub_SSE2 = gmx_add_pr(vc_sub_SSE2,sh_ewald_SSE);
682 vc_sub_SSE3 = gmx_add_pr(vc_sub_SSE3,sh_ewald_SSE);
686 vcoul_SSE0 = gmx_mul_pr(qq_SSE0,gmx_sub_pr(rinv_ex_SSE0,vc_sub_SSE0));
687 vcoul_SSE1 = gmx_mul_pr(qq_SSE1,gmx_sub_pr(rinv_ex_SSE1,vc_sub_SSE1));
688 vcoul_SSE2 = gmx_mul_pr(qq_SSE2,gmx_sub_pr(rinv_ex_SSE2,vc_sub_SSE2));
689 vcoul_SSE3 = gmx_mul_pr(qq_SSE3,gmx_sub_pr(rinv_ex_SSE3,vc_sub_SSE3));
694 /* Mask energy for cut-off and diagonal */
695 vcoul_SSE0 = gmx_and_pr(vcoul_SSE0,wco_SSE0);
696 vcoul_SSE1 = gmx_and_pr(vcoul_SSE1,wco_SSE1);
697 vcoul_SSE2 = gmx_and_pr(vcoul_SSE2,wco_SSE2);
698 vcoul_SSE3 = gmx_and_pr(vcoul_SSE3,wco_SSE3);
701 #endif /* CALC_COULOMB */
704 /* Lennard-Jones interaction */
706 #ifdef VDW_CUTOFF_CHECK
707 wco_vdw_SSE0 = gmx_cmplt_pr(rsq_SSE0,rcvdw2_SSE);
708 wco_vdw_SSE1 = gmx_cmplt_pr(rsq_SSE1,rcvdw2_SSE);
710 wco_vdw_SSE2 = gmx_cmplt_pr(rsq_SSE2,rcvdw2_SSE);
711 wco_vdw_SSE3 = gmx_cmplt_pr(rsq_SSE3,rcvdw2_SSE);
714 /* Same cut-off for Coulomb and VdW, reuse the registers */
715 #define wco_vdw_SSE0 wco_SSE0
716 #define wco_vdw_SSE1 wco_SSE1
717 #define wco_vdw_SSE2 wco_SSE2
718 #define wco_vdw_SSE3 wco_SSE3
722 rinvsix_SSE0 = gmx_mul_pr(rinvsq_SSE0,gmx_mul_pr(rinvsq_SSE0,rinvsq_SSE0));
723 rinvsix_SSE1 = gmx_mul_pr(rinvsq_SSE1,gmx_mul_pr(rinvsq_SSE1,rinvsq_SSE1));
725 rinvsix_SSE0 = gmx_and_pr(rinvsix_SSE0,int_SSE0);
726 rinvsix_SSE1 = gmx_and_pr(rinvsix_SSE1,int_SSE1);
729 rinvsix_SSE2 = gmx_mul_pr(rinvsq_SSE2,gmx_mul_pr(rinvsq_SSE2,rinvsq_SSE2));
730 rinvsix_SSE3 = gmx_mul_pr(rinvsq_SSE3,gmx_mul_pr(rinvsq_SSE3,rinvsq_SSE3));
732 rinvsix_SSE2 = gmx_and_pr(rinvsix_SSE2,int_SSE2);
733 rinvsix_SSE3 = gmx_and_pr(rinvsix_SSE3,int_SSE3);
736 #ifdef VDW_CUTOFF_CHECK
737 rinvsix_SSE0 = gmx_and_pr(rinvsix_SSE0,wco_vdw_SSE0);
738 rinvsix_SSE1 = gmx_and_pr(rinvsix_SSE1,wco_vdw_SSE1);
740 rinvsix_SSE2 = gmx_and_pr(rinvsix_SSE2,wco_vdw_SSE2);
741 rinvsix_SSE3 = gmx_and_pr(rinvsix_SSE3,wco_vdw_SSE3);
744 FrLJ6_SSE0 = gmx_mul_pr(c6_SSE0,rinvsix_SSE0);
745 FrLJ6_SSE1 = gmx_mul_pr(c6_SSE1,rinvsix_SSE1);
747 FrLJ6_SSE2 = gmx_mul_pr(c6_SSE2,rinvsix_SSE2);
748 FrLJ6_SSE3 = gmx_mul_pr(c6_SSE3,rinvsix_SSE3);
750 FrLJ12_SSE0 = gmx_mul_pr(c12_SSE0,gmx_mul_pr(rinvsix_SSE0,rinvsix_SSE0));
751 FrLJ12_SSE1 = gmx_mul_pr(c12_SSE1,gmx_mul_pr(rinvsix_SSE1,rinvsix_SSE1));
753 FrLJ12_SSE2 = gmx_mul_pr(c12_SSE2,gmx_mul_pr(rinvsix_SSE2,rinvsix_SSE2));
754 FrLJ12_SSE3 = gmx_mul_pr(c12_SSE3,gmx_mul_pr(rinvsix_SSE3,rinvsix_SSE3));
756 #endif /* not LJ_COMB_LB */
759 sir_SSE0 = gmx_mul_pr(sig_SSE0,rinv_SSE0);
760 sir_SSE1 = gmx_mul_pr(sig_SSE1,rinv_SSE1);
762 sir_SSE2 = gmx_mul_pr(sig_SSE2,rinv_SSE2);
763 sir_SSE3 = gmx_mul_pr(sig_SSE3,rinv_SSE3);
765 sir2_SSE0 = gmx_mul_pr(sir_SSE0,sir_SSE0);
766 sir2_SSE1 = gmx_mul_pr(sir_SSE1,sir_SSE1);
768 sir2_SSE2 = gmx_mul_pr(sir_SSE2,sir_SSE2);
769 sir2_SSE3 = gmx_mul_pr(sir_SSE3,sir_SSE3);
771 sir6_SSE0 = gmx_mul_pr(sir2_SSE0,gmx_mul_pr(sir2_SSE0,sir2_SSE0));
772 sir6_SSE1 = gmx_mul_pr(sir2_SSE1,gmx_mul_pr(sir2_SSE1,sir2_SSE1));
774 sir6_SSE0 = gmx_and_pr(sir6_SSE0,int_SSE0);
775 sir6_SSE1 = gmx_and_pr(sir6_SSE1,int_SSE1);
778 sir6_SSE2 = gmx_mul_pr(sir2_SSE2,gmx_mul_pr(sir2_SSE2,sir2_SSE2));
779 sir6_SSE3 = gmx_mul_pr(sir2_SSE3,gmx_mul_pr(sir2_SSE3,sir2_SSE3));
781 sir6_SSE2 = gmx_and_pr(sir6_SSE2,int_SSE2);
782 sir6_SSE3 = gmx_and_pr(sir6_SSE3,int_SSE3);
785 #ifdef VDW_CUTOFF_CHECK
786 sir6_SSE0 = gmx_and_pr(sir6_SSE0,wco_vdw_SSE0);
787 sir6_SSE1 = gmx_and_pr(sir6_SSE1,wco_vdw_SSE1);
789 sir6_SSE2 = gmx_and_pr(sir6_SSE2,wco_vdw_SSE2);
790 sir6_SSE3 = gmx_and_pr(sir6_SSE3,wco_vdw_SSE3);
793 FrLJ6_SSE0 = gmx_mul_pr(eps_SSE0,sir6_SSE0);
794 FrLJ6_SSE1 = gmx_mul_pr(eps_SSE1,sir6_SSE1);
796 FrLJ6_SSE2 = gmx_mul_pr(eps_SSE2,sir6_SSE2);
797 FrLJ6_SSE3 = gmx_mul_pr(eps_SSE3,sir6_SSE3);
799 FrLJ12_SSE0 = gmx_mul_pr(FrLJ6_SSE0,sir6_SSE0);
800 FrLJ12_SSE1 = gmx_mul_pr(FrLJ6_SSE1,sir6_SSE1);
802 FrLJ12_SSE2 = gmx_mul_pr(FrLJ6_SSE2,sir6_SSE2);
803 FrLJ12_SSE3 = gmx_mul_pr(FrLJ6_SSE3,sir6_SSE3);
805 #if defined CALC_ENERGIES
806 /* We need C6 and C12 to calculate the LJ potential shift */
807 sig2_SSE0 = gmx_mul_pr(sig_SSE0,sig_SSE0);
808 sig2_SSE1 = gmx_mul_pr(sig_SSE1,sig_SSE1);
810 sig2_SSE2 = gmx_mul_pr(sig_SSE2,sig_SSE2);
811 sig2_SSE3 = gmx_mul_pr(sig_SSE3,sig_SSE3);
813 sig6_SSE0 = gmx_mul_pr(sig2_SSE0,gmx_mul_pr(sig2_SSE0,sig2_SSE0));
814 sig6_SSE1 = gmx_mul_pr(sig2_SSE1,gmx_mul_pr(sig2_SSE1,sig2_SSE1));
816 sig6_SSE2 = gmx_mul_pr(sig2_SSE2,gmx_mul_pr(sig2_SSE2,sig2_SSE2));
817 sig6_SSE3 = gmx_mul_pr(sig2_SSE3,gmx_mul_pr(sig2_SSE3,sig2_SSE3));
819 c6_SSE0 = gmx_mul_pr(eps_SSE0,sig6_SSE0);
820 c6_SSE1 = gmx_mul_pr(eps_SSE1,sig6_SSE1);
822 c6_SSE2 = gmx_mul_pr(eps_SSE2,sig6_SSE2);
823 c6_SSE3 = gmx_mul_pr(eps_SSE3,sig6_SSE3);
825 c12_SSE0 = gmx_mul_pr(c6_SSE0,sig6_SSE0);
826 c12_SSE1 = gmx_mul_pr(c6_SSE1,sig6_SSE1);
828 c12_SSE2 = gmx_mul_pr(c6_SSE2,sig6_SSE2);
829 c12_SSE3 = gmx_mul_pr(c6_SSE3,sig6_SSE3);
832 #endif /* LJ_COMB_LB */
838 /* Extract the group pair index per j pair.
839 * Energy groups are stored per i-cluster, so things get
840 * complicated when the i- and j-cluster size don't match.
845 egps_j = nbat->energrp[cj>>1];
846 egp_jj[0] = ((egps_j >> ((cj & 1)*egps_jshift)) & egps_jmask)*egps_jstride;
848 /* We assume UNROLLI <= UNROLLJ */
850 for(jdi=0; jdi<UNROLLJ/UNROLLI; jdi++)
853 egps_j = nbat->energrp[cj*(UNROLLJ/UNROLLI)+jdi];
854 for(jj=0; jj<(UNROLLI/2); jj++)
856 egp_jj[jdi*(UNROLLI/2)+jj] = ((egps_j >> (jj*egps_jshift)) & egps_jmask)*egps_jstride;
864 #ifndef ENERGY_GROUPS
865 vctotSSE = gmx_add_pr(vctotSSE, gmx_sum4_pr(vcoul_SSE0,vcoul_SSE1,vcoul_SSE2,vcoul_SSE3));
867 add_ener_grp(vcoul_SSE0,vctp[0],egp_jj);
868 add_ener_grp(vcoul_SSE1,vctp[1],egp_jj);
869 add_ener_grp(vcoul_SSE2,vctp[2],egp_jj);
870 add_ener_grp(vcoul_SSE3,vctp[3],egp_jj);
875 /* Calculate the LJ energies */
876 VLJ6_SSE0 = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE0,gmx_mul_pr(c6_SSE0,sh_invrc6_SSE)));
877 VLJ6_SSE1 = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE1,gmx_mul_pr(c6_SSE1,sh_invrc6_SSE)));
879 VLJ6_SSE2 = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE2,gmx_mul_pr(c6_SSE2,sh_invrc6_SSE)));
880 VLJ6_SSE3 = gmx_mul_pr(sixthSSE,gmx_sub_pr(FrLJ6_SSE3,gmx_mul_pr(c6_SSE3,sh_invrc6_SSE)));
882 VLJ12_SSE0 = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE0,gmx_mul_pr(c12_SSE0,sh_invrc12_SSE)));
883 VLJ12_SSE1 = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE1,gmx_mul_pr(c12_SSE1,sh_invrc12_SSE)));
885 VLJ12_SSE2 = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE2,gmx_mul_pr(c12_SSE2,sh_invrc12_SSE)));
886 VLJ12_SSE3 = gmx_mul_pr(twelvethSSE,gmx_sub_pr(FrLJ12_SSE3,gmx_mul_pr(c12_SSE3,sh_invrc12_SSE)));
889 VLJ_SSE0 = gmx_sub_pr(VLJ12_SSE0,VLJ6_SSE0);
890 VLJ_SSE1 = gmx_sub_pr(VLJ12_SSE1,VLJ6_SSE1);
892 VLJ_SSE2 = gmx_sub_pr(VLJ12_SSE2,VLJ6_SSE2);
893 VLJ_SSE3 = gmx_sub_pr(VLJ12_SSE3,VLJ6_SSE3);
895 /* The potential shift should be removed for pairs beyond cut-off */
896 VLJ_SSE0 = gmx_and_pr(VLJ_SSE0,wco_vdw_SSE0);
897 VLJ_SSE1 = gmx_and_pr(VLJ_SSE1,wco_vdw_SSE1);
899 VLJ_SSE2 = gmx_and_pr(VLJ_SSE2,wco_vdw_SSE2);
900 VLJ_SSE3 = gmx_and_pr(VLJ_SSE3,wco_vdw_SSE3);
903 /* The potential shift should be removed for excluded pairs */
904 VLJ_SSE0 = gmx_and_pr(VLJ_SSE0,int_SSE0);
905 VLJ_SSE1 = gmx_and_pr(VLJ_SSE1,int_SSE1);
907 VLJ_SSE2 = gmx_and_pr(VLJ_SSE2,int_SSE2);
908 VLJ_SSE3 = gmx_and_pr(VLJ_SSE3,int_SSE3);
911 #ifndef ENERGY_GROUPS
912 VvdwtotSSE = gmx_add_pr(VvdwtotSSE,
914 gmx_sum4_pr(VLJ_SSE0,VLJ_SSE1,VLJ_SSE2,VLJ_SSE3)
916 gmx_add_pr(VLJ_SSE0,VLJ_SSE1)
920 add_ener_grp(VLJ_SSE0,vvdwtp[0],egp_jj);
921 add_ener_grp(VLJ_SSE1,vvdwtp[1],egp_jj);
923 add_ener_grp(VLJ_SSE2,vvdwtp[2],egp_jj);
924 add_ener_grp(VLJ_SSE3,vvdwtp[3],egp_jj);
928 #endif /* CALC_ENERGIES */
931 fscal_SSE0 = gmx_mul_pr(rinvsq_SSE0,
933 gmx_add_pr(frcoul_SSE0,
937 gmx_sub_pr(FrLJ12_SSE0,FrLJ6_SSE0)));
938 fscal_SSE1 = gmx_mul_pr(rinvsq_SSE1,
940 gmx_add_pr(frcoul_SSE1,
944 gmx_sub_pr(FrLJ12_SSE1,FrLJ6_SSE1)));
946 fscal_SSE0 = gmx_mul_pr(rinvsq_SSE0,frcoul_SSE0);
947 fscal_SSE1 = gmx_mul_pr(rinvsq_SSE1,frcoul_SSE1);
949 #if defined CALC_LJ && !defined HALF_LJ
950 fscal_SSE2 = gmx_mul_pr(rinvsq_SSE2,
952 gmx_add_pr(frcoul_SSE2,
956 gmx_sub_pr(FrLJ12_SSE2,FrLJ6_SSE2)));
957 fscal_SSE3 = gmx_mul_pr(rinvsq_SSE3,
959 gmx_add_pr(frcoul_SSE3,
963 gmx_sub_pr(FrLJ12_SSE3,FrLJ6_SSE3)));
965 /* Atom 2 and 3 don't have LJ, so only add Coulomb forces */
966 fscal_SSE2 = gmx_mul_pr(rinvsq_SSE2,frcoul_SSE2);
967 fscal_SSE3 = gmx_mul_pr(rinvsq_SSE3,frcoul_SSE3);
970 /* Calculate temporary vectorial force */
971 tx_SSE0 = gmx_mul_pr(fscal_SSE0,dx_SSE0);
972 tx_SSE1 = gmx_mul_pr(fscal_SSE1,dx_SSE1);
973 tx_SSE2 = gmx_mul_pr(fscal_SSE2,dx_SSE2);
974 tx_SSE3 = gmx_mul_pr(fscal_SSE3,dx_SSE3);
975 ty_SSE0 = gmx_mul_pr(fscal_SSE0,dy_SSE0);
976 ty_SSE1 = gmx_mul_pr(fscal_SSE1,dy_SSE1);
977 ty_SSE2 = gmx_mul_pr(fscal_SSE2,dy_SSE2);
978 ty_SSE3 = gmx_mul_pr(fscal_SSE3,dy_SSE3);
979 tz_SSE0 = gmx_mul_pr(fscal_SSE0,dz_SSE0);
980 tz_SSE1 = gmx_mul_pr(fscal_SSE1,dz_SSE1);
981 tz_SSE2 = gmx_mul_pr(fscal_SSE2,dz_SSE2);
982 tz_SSE3 = gmx_mul_pr(fscal_SSE3,dz_SSE3);
984 /* Increment i atom force */
985 fix_SSE0 = gmx_add_pr(fix_SSE0,tx_SSE0);
986 fix_SSE1 = gmx_add_pr(fix_SSE1,tx_SSE1);
987 fix_SSE2 = gmx_add_pr(fix_SSE2,tx_SSE2);
988 fix_SSE3 = gmx_add_pr(fix_SSE3,tx_SSE3);
989 fiy_SSE0 = gmx_add_pr(fiy_SSE0,ty_SSE0);
990 fiy_SSE1 = gmx_add_pr(fiy_SSE1,ty_SSE1);
991 fiy_SSE2 = gmx_add_pr(fiy_SSE2,ty_SSE2);
992 fiy_SSE3 = gmx_add_pr(fiy_SSE3,ty_SSE3);
993 fiz_SSE0 = gmx_add_pr(fiz_SSE0,tz_SSE0);
994 fiz_SSE1 = gmx_add_pr(fiz_SSE1,tz_SSE1);
995 fiz_SSE2 = gmx_add_pr(fiz_SSE2,tz_SSE2);
996 fiz_SSE3 = gmx_add_pr(fiz_SSE3,tz_SSE3);
998 /* Decrement j atom force */
1000 gmx_sub_pr( gmx_load_pr(f+ajx), gmx_sum4_pr(tx_SSE0,tx_SSE1,tx_SSE2,tx_SSE3) ));
1002 gmx_sub_pr( gmx_load_pr(f+ajy), gmx_sum4_pr(ty_SSE0,ty_SSE1,ty_SSE2,ty_SSE3) ));
1004 gmx_sub_pr( gmx_load_pr(f+ajz), gmx_sum4_pr(tz_SSE0,tz_SSE1,tz_SSE2,tz_SSE3) ));
1017 #undef CUTOFF_BLENDV